summaryrefslogtreecommitdiffstats
path: root/chromium/gpu/config/gpu_info.h
blob: fe4903ebb81e19f5020f24e61a7f99ed750ef80d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef GPU_CONFIG_GPU_INFO_H_
#define GPU_CONFIG_GPU_INFO_H_

// Provides access to the GPU information for the system
// on which chrome is currently running.

#include <stdint.h>

#include <string>
#include <vector>

#include "base/containers/span.h"
#include "base/optional.h"
#include "base/time/time.h"
#include "base/version.h"
#include "build/build_config.h"
#include "gpu/config/dx_diag_node.h"
#include "gpu/gpu_export.h"
#include "gpu/vulkan/buildflags.h"
#include "ui/gfx/geometry/size.h"

#if defined(OS_WIN)
#include <dxgi.h>
#endif

#if BUILDFLAG(ENABLE_VULKAN)
#include "gpu/config/vulkan_info.h"
#endif

namespace gpu {

// These values are persistent to logs. Entries should not be renumbered and
// numeric values should never be reused.
// This should match enum IntelGpuSeriesType in
//  \tools\metrics\histograms\enums.xml
enum class IntelGpuSeriesType {
  kUnknown = 0,
  // Intel 4th gen
  kBroadwater = 16,
  kEaglelake = 17,
  // Intel 5th gen
  kIronlake = 18,
  // Intel 6th gen
  kSandybridge = 1,
  // Intel 7th gen
  kBaytrail = 2,
  kIvybridge = 3,
  kHaswell = 4,
  // Intel 8th gen
  kCherrytrail = 5,
  kBroadwell = 6,
  // Intel 9th gen
  kApollolake = 7,
  kSkylake = 8,
  kGeminilake = 9,
  kAmberlake = 23,
  kKabylake = 10,
  kCoffeelake = 11,
  kWhiskeylake = 12,
  kCometlake = 13,
  // Intel 10th gen
  kCannonlake = 14,
  // Intel 11th gen
  kIcelake = 15,
  kElkhartlake = 19,
  kJasperlake = 20,
  // Intel 12th gen
  kTigerlake = 21,
  kRocketlake = 24,
  kDG1 = 25,
  kAlderlake = 22,
  kAlchemist = 26,
  // Please also update |gpu_series_map| in process_json.py.
  kMaxValue = kAlchemist,
};

// Video profile.  This *must* match media::VideoCodecProfile.
enum VideoCodecProfile {
  VIDEO_CODEC_PROFILE_UNKNOWN = -1,
  VIDEO_CODEC_PROFILE_MIN = VIDEO_CODEC_PROFILE_UNKNOWN,
  H264PROFILE_BASELINE = 0,
  H264PROFILE_MAIN,
  H264PROFILE_EXTENDED,
  H264PROFILE_HIGH,
  H264PROFILE_HIGH10PROFILE,
  H264PROFILE_HIGH422PROFILE,
  H264PROFILE_HIGH444PREDICTIVEPROFILE,
  H264PROFILE_SCALABLEBASELINE,
  H264PROFILE_SCALABLEHIGH,
  H264PROFILE_STEREOHIGH,
  H264PROFILE_MULTIVIEWHIGH,
  VP8PROFILE_ANY,
  VP9PROFILE_PROFILE0,
  VP9PROFILE_PROFILE1,
  VP9PROFILE_PROFILE2,
  VP9PROFILE_PROFILE3,
  HEVCPROFILE_MAIN,
  HEVCPROFILE_MAIN10,
  HEVCPROFILE_MAIN_STILL_PICTURE,
  DOLBYVISION_PROFILE0,
  DOLBYVISION_PROFILE4,
  DOLBYVISION_PROFILE5,
  DOLBYVISION_PROFILE7,
  THEORAPROFILE_ANY,
  AV1PROFILE_PROFILE_MAIN,
  AV1PROFILE_PROFILE_HIGH,
  AV1PROFILE_PROFILE_PRO,
  DOLBYVISION_PROFILE8,
  DOLBYVISION_PROFILE9,
  VIDEO_CODEC_PROFILE_MAX = DOLBYVISION_PROFILE9,
};

// Specification of a decoding profile supported by a hardware decoder.
struct GPU_EXPORT VideoDecodeAcceleratorSupportedProfile {
  VideoCodecProfile profile;
  gfx::Size max_resolution;
  gfx::Size min_resolution;
  bool encrypted_only;
};

using VideoDecodeAcceleratorSupportedProfiles =
    std::vector<VideoDecodeAcceleratorSupportedProfile>;

struct GPU_EXPORT VideoDecodeAcceleratorCapabilities {
  VideoDecodeAcceleratorCapabilities();
  VideoDecodeAcceleratorCapabilities(
      const VideoDecodeAcceleratorCapabilities& other);
  ~VideoDecodeAcceleratorCapabilities();
  VideoDecodeAcceleratorSupportedProfiles supported_profiles;
  uint32_t flags;
};

// Specification of an encoding profile supported by a hardware encoder.
struct GPU_EXPORT VideoEncodeAcceleratorSupportedProfile {
  VideoCodecProfile profile;
  gfx::Size min_resolution;
  gfx::Size max_resolution;
  uint32_t max_framerate_numerator;
  uint32_t max_framerate_denominator;
};
using VideoEncodeAcceleratorSupportedProfiles =
    std::vector<VideoEncodeAcceleratorSupportedProfile>;

enum class ImageDecodeAcceleratorType {
  kUnknown = 0,
  kJpeg = 1,
  kWebP = 2,
  kMaxValue = kWebP,
};

enum class ImageDecodeAcceleratorSubsampling {
  k420 = 0,
  k422 = 1,
  k444 = 2,
  kMaxValue = k444,
};

// Specification of an image decoding profile supported by a hardware decoder.
struct GPU_EXPORT ImageDecodeAcceleratorSupportedProfile {
  ImageDecodeAcceleratorSupportedProfile();
  ImageDecodeAcceleratorSupportedProfile(
      const ImageDecodeAcceleratorSupportedProfile& other);
  ImageDecodeAcceleratorSupportedProfile(
      ImageDecodeAcceleratorSupportedProfile&& other);
  ~ImageDecodeAcceleratorSupportedProfile();
  ImageDecodeAcceleratorSupportedProfile& operator=(
      const ImageDecodeAcceleratorSupportedProfile& other);
  ImageDecodeAcceleratorSupportedProfile& operator=(
      ImageDecodeAcceleratorSupportedProfile&& other);

  // Fields common to all image types.
  // Type of image to which this profile applies, e.g., JPEG.
  ImageDecodeAcceleratorType image_type;
  // Minimum and maximum supported pixel dimensions of the encoded image.
  gfx::Size min_encoded_dimensions;
  gfx::Size max_encoded_dimensions;

  // Fields specific to |image_type| == kJpeg.
  // The supported chroma subsampling formats, e.g. 4:2:0.
  std::vector<ImageDecodeAcceleratorSubsampling> subsamplings;
};
using ImageDecodeAcceleratorSupportedProfiles =
    std::vector<ImageDecodeAcceleratorSupportedProfile>;

#if defined(OS_WIN)
enum class OverlaySupport {
  kNone = 0,
  kDirect = 1,
  kScaling = 2,
  kSoftware = 3
};

GPU_EXPORT const char* OverlaySupportToString(OverlaySupport support);

struct GPU_EXPORT OverlayInfo {
  OverlayInfo& operator=(const OverlayInfo& other) = default;
  bool operator==(const OverlayInfo& other) const {
    return direct_composition == other.direct_composition &&
           supports_overlays == other.supports_overlays &&
           yuy2_overlay_support == other.yuy2_overlay_support &&
           nv12_overlay_support == other.nv12_overlay_support &&
           bgra8_overlay_support == other.bgra8_overlay_support &&
           rgb10a2_overlay_support == other.rgb10a2_overlay_support;
  }
  bool operator!=(const OverlayInfo& other) const { return !(*this == other); }

  // True if we use direct composition surface on Windows.
  bool direct_composition = false;

  // True if we use direct composition surface overlays on Windows.
  bool supports_overlays = false;
  OverlaySupport yuy2_overlay_support = OverlaySupport::kNone;
  OverlaySupport nv12_overlay_support = OverlaySupport::kNone;
  OverlaySupport bgra8_overlay_support = OverlaySupport::kNone;
  OverlaySupport rgb10a2_overlay_support = OverlaySupport::kNone;
};

#endif

#if defined(OS_MAC)
GPU_EXPORT bool ValidateMacOSSpecificTextureTarget(int target);
#endif  // OS_MAC

struct GPU_EXPORT GPUInfo {
  struct GPU_EXPORT GPUDevice {
    GPUDevice();
    GPUDevice(const GPUDevice& other);
    GPUDevice(GPUDevice&& other) noexcept;
    ~GPUDevice() noexcept;
    GPUDevice& operator=(const GPUDevice& other);
    GPUDevice& operator=(GPUDevice&& other) noexcept;

    // The DWORD (uint32_t) representing the graphics card vendor id.
    uint32_t vendor_id = 0u;

    // The DWORD (uint32_t) representing the graphics card device id.
    // Device ids are unique to vendor, not to one another.
    uint32_t device_id = 0u;

#if defined(OS_WIN)
    // The graphics card subsystem id.
    // The lower 16 bits represents the subsystem vendor id.
    uint32_t sub_sys_id = 0u;

    // The graphics card revision number.
    uint32_t revision = 0u;

    // The graphics card LUID. This is a unique identifier for the graphics card
    // that is guaranteed to be unique until the computer is restarted. The LUID
    // is used over the vendor id and device id because the device id is only
    // unique relative its vendor, not to each other. If there are more than one
    // of the same exact graphics card, they all have the same vendor id and
    // device id but different LUIDs.
    LUID luid;
#endif  // OS_WIN

    // Whether this GPU is the currently used one.
    // Currently this field is only supported and meaningful on OS X and on
    // Windows using Angle with D3D11.
    bool active = false;

    // The strings that describe the GPU.
    // In Linux these strings are obtained through libpci.
    // In Win/MacOSX, these two strings are not filled at the moment.
    // In Android, these are respectively GL_VENDOR and GL_RENDERER.
    std::string vendor_string;
    std::string device_string;

    std::string driver_vendor;
    std::string driver_version;

    // NVIDIA CUDA compute capability, major version. 0 if undetermined. Can be
    // used to determine the hardware generation that the GPU belongs to.
    int cuda_compute_capability_major = 0;
  };

  GPUInfo();
  GPUInfo(const GPUInfo& other);
  ~GPUInfo();

  // The currently active gpu.
  GPUDevice& active_gpu();
  const GPUDevice& active_gpu() const;

  bool IsInitialized() const;

  // The amount of time taken to get from the process starting to the message
  // loop being pumped.
  base::TimeDelta initialization_time;

  // Computer has NVIDIA Optimus
  bool optimus;

  // Computer has AMD Dynamic Switchable Graphics
  bool amd_switchable;

  // Primary GPU, for exmaple, the discrete GPU in a dual GPU machine.
  GPUDevice gpu;

  // Secondary GPUs, for example, the integrated GPU in a dual GPU machine.
  std::vector<GPUDevice> secondary_gpus;

  // The version of the pixel/fragment shader used by the gpu.
  std::string pixel_shader_version;

  // The version of the vertex shader used by the gpu.
  std::string vertex_shader_version;

  // The maximum multisapling sample count, either through ES3 or
  // EXT_multisampled_render_to_texture MSAA.
  std::string max_msaa_samples;

  // The machine model identifier. They can contain any character, including
  // whitespaces.  Currently it is supported on MacOSX and Android.
  // Android examples: "Naxus 5", "XT1032".
  // On MacOSX, the version is stripped out of the model identifier, for
  // example, the original identifier is "MacBookPro7,2", and we put
  // "MacBookPro" as machine_model_name, and "7.2" as machine_model_version.
  std::string machine_model_name;

  // The version of the machine model. Currently it is supported on MacOSX.
  // See machine_model_name's comment.
  std::string machine_model_version;

  // The GL_VERSION string.
  std::string gl_version;

  // The GL_VENDOR string.
  std::string gl_vendor;

  // The GL_RENDERER string.
  std::string gl_renderer;

  // The GL_EXTENSIONS string.
  std::string gl_extensions;

  // GL window system binding vendor.  "" if not available.
  std::string gl_ws_vendor;

  // GL window system binding version.  "" if not available.
  std::string gl_ws_version;

  // GL window system binding extensions.  "" if not available.
  std::string gl_ws_extensions;

  // GL reset notification strategy as defined by GL_ARB_robustness. 0 if GPU
  // reset detection or notification not available.
  uint32_t gl_reset_notification_strategy;

  bool software_rendering;

  // Empty means unknown. Defined on X11 as
  // - "1" means indirect (versions can't be all zero)
  // - "2" means some type of direct rendering, but version cannot not be
  //    reliably determined
  // - "2.1", "2.2", "2.3" for DRI, DRI2, DRI3 respectively
  std::string direct_rendering_version;

  // Whether the gpu process is running in a sandbox.
  bool sandboxed;

  // True if the GPU is running in the browser process instead of its own.
  bool in_process_gpu;

  // True if the GPU process is using the passthrough command decoder.
  bool passthrough_cmd_decoder;

  // True only on android when extensions for threaded mailbox sharing are
  // present. Threaded mailbox sharing is used on Android only, so this check
  // is only implemented on Android.
  bool can_support_threaded_texture_mailbox = false;

#if defined(OS_MAC)
  // Enum describing which texture target is used for native GpuMemoryBuffers on
  // MacOS. Valid values are GL_TEXTURE_2D and GL_TEXTURE_RECTANGLE_ARB.
  uint32_t macos_specific_texture_target;
#endif  // OS_MAC

#if defined(OS_WIN)
  // The information returned by the DirectX Diagnostics Tool.
  DxDiagNode dx_diagnostics;

  // The supported d3d feature level in the gpu driver;
  uint32_t d3d12_feature_level = 0;

  // The support Vulkan API version in the gpu driver;
  uint32_t vulkan_version = 0;

  // The GPU hardware overlay info.
  OverlayInfo overlay_info;
#endif

  VideoDecodeAcceleratorCapabilities video_decode_accelerator_capabilities;
  VideoEncodeAcceleratorSupportedProfiles
      video_encode_accelerator_supported_profiles;
  bool jpeg_decode_accelerator_supported;

  ImageDecodeAcceleratorSupportedProfiles
      image_decode_accelerator_supported_profiles;

  bool oop_rasterization_supported;

  bool subpixel_font_rendering;

#if BUILDFLAG(ENABLE_VULKAN)
  base::Optional<VulkanInfo> vulkan_info;
#endif

  // Note: when adding new members, please remember to update EnumerateFields
  // in gpu_info.cc.

  // In conjunction with EnumerateFields, this allows the embedder to
  // enumerate the values in this structure without having to embed
  // references to its specific member variables. This simplifies the
  // addition of new fields to this type.
  class Enumerator {
   public:
    // The following methods apply to the "current" object. Initially this
    // is the root object, but calls to BeginGPUDevice/EndGPUDevice and
    // BeginAuxAttributes/EndAuxAttributes change the object to which these
    // calls should apply.
    virtual void AddInt64(const char* name, int64_t value) = 0;
    virtual void AddInt(const char* name, int value) = 0;
    virtual void AddString(const char* name, const std::string& value) = 0;
    virtual void AddBool(const char* name, bool value) = 0;
    virtual void AddTimeDeltaInSecondsF(const char* name,
                                        const base::TimeDelta& value) = 0;
    virtual void AddBinary(const char* name,
                           const base::span<const uint8_t>& blob) = 0;

    // Markers indicating that a GPUDevice is being described.
    virtual void BeginGPUDevice() = 0;
    virtual void EndGPUDevice() = 0;

    // Markers indicating that a VideoDecodeAcceleratorSupportedProfile is
    // being described.
    virtual void BeginVideoDecodeAcceleratorSupportedProfile() = 0;
    virtual void EndVideoDecodeAcceleratorSupportedProfile() = 0;

    // Markers indicating that a VideoEncodeAcceleratorSupportedProfile is
    // being described.
    virtual void BeginVideoEncodeAcceleratorSupportedProfile() = 0;
    virtual void EndVideoEncodeAcceleratorSupportedProfile() = 0;

    // Markers indicating that an ImageDecodeAcceleratorSupportedProfile is
    // being described.
    virtual void BeginImageDecodeAcceleratorSupportedProfile() = 0;
    virtual void EndImageDecodeAcceleratorSupportedProfile() = 0;

    // Markers indicating that "auxiliary" attributes of the GPUInfo
    // (according to the DevTools protocol) are being described.
    virtual void BeginAuxAttributes() = 0;
    virtual void EndAuxAttributes() = 0;

    virtual void BeginOverlayInfo() = 0;
    virtual void EndOverlayInfo() = 0;

   protected:
    virtual ~Enumerator() = default;
  };

  // Outputs the fields in this structure to the provided enumerator.
  void EnumerateFields(Enumerator* enumerator) const;
};

}  // namespace gpu

#endif  // GPU_CONFIG_GPU_INFO_H_