1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
| #ifndef CPUID_H
#define CPUID_H
#include <string.h>
struct cpuid_reg { unsigned int eax,ebx,ecx,edx; };
#if (defined(__ICL) || defined(_MSC_VER)) && defined(WIN32)
inline cpuid_reg _cpuid(unsigned int a, unsigned int c=0)
{
cpuid_reg r;
//__try
//{
_asm
{
mov eax, a
mov ecx, c
cpuid
mov r.eax, eax
mov r.ebx, ebx
mov r.ecx, ecx
mov r.edx, edx
}
//} __except (EXCEPTION_EXECUTE_HANDLER) { r.eax=r.ebx=r.ecx=r.edx=0; }
return r;
}
#elif defined(__GNUC__) && (defined(i386)||defined(__x86_64__))
inline cpuid_reg _cpuid(unsigned int a, unsigned int c=0)
{
cpuid_reg r;
__asm__("cpuid":"=a"(r.eax),"=b"(r.ebx),"=c"(r.ecx),"=d"(r.edx):"a"(a),"c"(c));
return r;
}
#else
inline cpuid_reg _cpuid(unsigned int a, unsigned int c=0)
{
cpuid_reg r={0,0,0,0};
return r;
}
#endif
inline cpuid_reg cpuid(unsigned int a, unsigned int c=0)
{
if (a<=_cpuid(0).eax) return _cpuid(a,c);
cpuid_reg r={0,0,0,0};
return r;
}
inline cpuid_reg cpuid_ext(unsigned int a, unsigned int c=0)
{
a|=0x80000000;
if (a<=_cpuid(0x80000000).eax) return _cpuid(a,c);
cpuid_reg r={0,0,0,0};
return r;
}
static char _cpu_vendor[13];
static char _cpu_name[48];
inline char *cpu_vendor() { unsigned int *p=(unsigned int *)_cpu_vendor; cpuid_reg reg=cpuid(0); p[0]=reg.ebx; p[1]=reg.edx; p[2]=reg.ecx; _cpu_vendor[12]='\0'; return _cpu_vendor; }
inline char *cpu_name () { unsigned int *p=(unsigned int *)_cpu_name; cpuid_reg reg; reg=cpuid_ext(2); p[0]=reg.eax; p[1]=reg.ebx; p[2]=reg.ecx; p[3]=reg.edx; reg=cpuid_ext(3); p[4]=reg.eax; p[5]=reg.ebx; p[6]=reg.ecx; p[7]=reg.edx; reg=cpuid_ext(4); p[8]=reg.eax; p[9]=reg.ebx; p[10]=reg.ecx; p[11]=reg.edx; for (int i=0; i<48; ++i) if (_cpu_name[i]!=' ') return &_cpu_name[i]; return NULL; }
inline bool intel_cpu () { cpuid_reg reg=cpuid(0); return reg.ebx==0x756E6547; }
inline bool amd_cpu () { cpuid_reg reg=cpuid(0); return reg.ebx==0x68747541; }
inline bool mmx_cpu () { return (cpuid(1).edx&0x00800000)!=0; }
inline bool fxsr_cpu () { return (cpuid(1).edx&0x01000000)!=0; }
inline bool sse_cpu () { return (cpuid(1).edx&0x02000000)!=0; }
inline bool sse2_cpu () { return (cpuid(1).edx&0x04000000)!=0; }
inline bool htt_cpu () { return (cpuid(1).edx&0x10000000)!=0; }
inline bool sse3_cpu () { return (cpuid(1).ecx&0x00000001)!=0; }
inline bool vmx_cpu () { return (cpuid(1).ecx&0x00000020)!=0; }
inline bool eist_cpu () { return (cpuid(1).ecx&0x00000080)!=0; }
inline bool tm2_cpu () { return (cpuid(1).ecx&0x00000100)!=0; }
inline bool ssse3_cpu () { return (cpuid(1).ecx&0x00000200)!=0; }
inline bool ia64_cpu () { return (cpuid(1).ecx&0x40000000)!=0; }
inline bool sse41_cpu () { return (cpuid(1).ecx&0x00080000)!=0; }
inline bool sse42_cpu () { return (cpuid(1).ecx&0x00100000)!=0; }
inline bool amdlegacy_cpu() { return (cpuid_ext(1).ecx&0x00000002)!=0; }
inline bool sse5_cpu () { return (cpuid_ext(1).ecx&0x00000800)!=0; }
inline bool amdmmx_cpu() { return (cpuid_ext(1).edx&0x00400000)!=0; }
inline bool amd64_cpu () { return (cpuid_ext(1).edx&0x20000000)!=0; }
inline bool amd3dnowext_cpu() { return (cpuid_ext(1).edx&0x40000000)!=0; }
inline bool amd3dnow_cpu () { return (cpuid_ext(1).edx&0x80000000)!=0; }
inline bool x64_cpu() { return ia64_cpu() || amd64_cpu(); }
inline bool ht_cpu () { return (intel_cpu()) ? ((cpuid(1).ebx>>16)&0xFF)>1 : false; }
//inline unsigned int cpu_threads () { if (intel_cpu()) return (((cpuid(4).eax>>26)&0x03F)+1)*((cpuid(1).ebx>>16)&0xFF); if (!htt_cpu()) return 1; if (amdlegacy_cpu()) return (cpuid(1).ebx>>16)&0xFF; return (cpuid_ext(8).ecx&0xFF)+1; }
inline unsigned int cpu_threads () { return (intel_cpu()) ? (cpuid(1).ebx>>16)&0xFF : 1; if (!htt_cpu()) return 1; if (amdlegacy_cpu()) return (cpuid(1).ebx>>16)&0xFF; return (cpuid_ext(8).ecx&0xFF)+1; }
inline unsigned int cpu_cores () { if (intel_cpu()) return ((cpuid(4).eax>>26)&0x03F)+1; if (!htt_cpu()) return 1; if (amdlegacy_cpu()) return (cpuid(1).ebx>>16)&0xFF; return (cpuid_ext(8).ecx&0xFF)+1; }
inline unsigned int core_threads () { return cpu_threads()/cpu_cores(); }
inline unsigned int l1_threads () { return ((cpuid(4,1).eax>>14)&0xFFF)+1; }
inline unsigned int l2_threads () { return ((cpuid(4,2).eax>>14)&0xFFF)+1; }
inline unsigned int l3_threads () { return ((cpuid(4,3).eax>>14)&0xFFF)+1; }
inline unsigned int l1_size () { if (intel_cpu()) { cpuid_reg reg=cpuid(4,1); return (((reg.ebx>>22)&0x3FF)+1)*(((reg.ebx>>12)&0x3FF)+1)*((reg.ebx&0xFFF)+1)*(reg.ecx+1); } return 1024*(cpuid_ext(5).ecx>>24); }
inline unsigned int l2_size () { if (intel_cpu()) { cpuid_reg reg=cpuid(4,2); return (((reg.ebx>>22)&0x3FF)+1)*(((reg.ebx>>12)&0x3FF)+1)*((reg.ebx&0xFFF)+1)*(reg.ecx+1); } return 1024*(cpuid_ext(6).ecx>>16); }
inline unsigned int l3_size () { if (intel_cpu()) { cpuid_reg reg=cpuid(4,3); return (((reg.ebx>>22)&0x3FF)+1)*(((reg.ebx>>12)&0x3FF)+1)*((reg.ebx&0xFFF)+1)*(reg.ecx+1); } return 512*(cpuid_ext(6).edx>>18); }
#if (defined(__ICL) || defined(_MSC_VER)) && defined(WIN32)
#ifndef _WIN32_WINNT
#define _WIN32_WINNT 0x0403
#endif
#include <windows.h>
#undef min
#undef max
inline int os_threads()
{
SYSTEM_INFO info;
::GetSystemInfo(&info);
return info.dwNumberOfProcessors;
}
#elif (defined(__MACOSX__) || defined(__APPLE__))
#include <Multiprocessing.h>
inline int os_threads() { return MPProcessorsScheduled(); }
#else
inline int os_threads() { return cpu_threads(); }
#endif
inline int os_cores() { return os_threads()/core_threads(); }
#endif
//#include "cpuid.h"
//#include <string>
//#include <iostream>
//#include <fstream>
//using namespace std;
//int main()
//{
// ofstream fout((string(cpu_name())+".txt").c_str());
//
// cout << "Name \t" << cpu_name () << endl;
// cout << "Vendor\t" << cpu_vendor() << endl;
// cout << "Intel \t" << intel_cpu() << endl;
// cout << "Amd \t" << amd_cpu () << endl;
// cout << "X64 \t" << x64_cpu () << endl;
// cout << endl;
// cout << "MMX \t" << mmx_cpu () << endl;
// cout << "SSE \t" << sse_cpu () << endl;
// cout << "SSE2 \t" << sse2_cpu () << endl;
// cout << "SSE3 \t" << sse3_cpu () << endl;
// cout << "SSSE3 \t" << ssse3_cpu () << endl;
// cout << "SSE4.1 \t" << sse41_cpu () << endl;
// cout << "SSE4.2 \t" << sse42_cpu () << endl;
// cout << "MMX+ \t" << amdmmx_cpu () << endl;
// cout << "3DNow! \t" << amd3dnow_cpu () << endl;
// cout << "3DNow!+\t" << amd3dnowext_cpu() << endl;
// cout << endl;
// cout << "L1 size\t" << l1_size() << endl;
// cout << "L2 size\t" << l2_size() << endl;
// cout << "L3 size\t" << l3_size() << endl;
// cout << endl;
// cout << "Threads serviced by L1 \t" << l1_threads () << endl;
// cout << "Threads serviced by L2 \t" << l2_threads () << endl;
// cout << "Threads serviced by L3 \t" << l3_threads () << endl;
// cout << endl;
// cout << "Cores per cpu \t" << cpu_cores () << endl;
// cout << "Cores from OS \t" << os_cores () << endl;
// cout << "Threads per cpu \t" << cpu_threads () << endl;
// cout << "Threads from OS \t" << os_threads () << endl;
//} |
Partager