X-Git-Url: http://git.samba.org/samba.git/?a=blobdiff_plain;f=drivers%2Fgpu%2Fdrm%2Fi915%2Fi915_drv.h;h=b6b175aa5d255db9b536ef68ca323655a8c739e0;hb=29baa82aa55f40d67cfc8138c944fd8880c27e8e;hp=6b4771adb8ba01ee8e32bccbb40f9bd2bf35561b;hpb=ac6c35a4d8c77083525044a192cb1a8711381e94;p=sfrench%2Fcifs-2.6.git diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6b4771adb8ba..b6b175aa5d25 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -80,8 +80,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20170717" -#define DRIVER_TIMESTAMP 1500275179 +#define DRIVER_DATE "20170818" +#define DRIVER_TIMESTAMP 1503088845 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions @@ -602,7 +602,7 @@ struct drm_i915_file_private { * to limit the badly behaving clients access to gpu. */ #define I915_MAX_CLIENT_CONTEXT_BANS 3 - int context_bans; + atomic_t context_bans; }; /* Used by dp and fdi links */ @@ -646,6 +646,7 @@ struct intel_opregion { u32 swsci_sbcb_sub_functions; struct opregion_asle *asle; void *rvda; + void *vbt_firmware; const void *vbt; u32 vbt_size; u32 *lid_state; @@ -714,11 +715,6 @@ struct drm_i915_display_funcs { void (*fdi_link_train)(struct intel_crtc *crtc, const struct intel_crtc_state *crtc_state); void (*init_clock_gating)(struct drm_i915_private *dev_priv); - int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_i915_gem_object *obj, - struct drm_i915_gem_request *req, - uint32_t flags); void (*hpd_irq_setup)(struct drm_i915_private *dev_priv); /* clock updates for mode set */ /* cursor updates */ @@ -1062,6 +1058,11 @@ struct intel_fbc { bool underrun_detected; struct work_struct underrun_work; + /* + * Due to the atomic rules we can't access some structures without the + * appropriate locking, so we cache information here in order to avoid + * these problems. + */ struct intel_fbc_state_cache { struct i915_vma *vma; @@ -1083,6 +1084,13 @@ struct intel_fbc { } fb; } state_cache; + /* + * This structure contains everything that's relevant to program the + * hardware registers. When we want to figure out if we need to disable + * and re-enable FBC for a new configuration we just check if there's + * something different in the struct. The genx_fbc_activate functions + * are supposed to read from it in order to program the registers. + */ struct intel_fbc_reg_params { struct i915_vma *vma; @@ -1158,8 +1166,8 @@ enum intel_pch { PCH_CPT, /* Cougarpoint/Pantherpoint PCH */ PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */ PCH_SPT, /* Sunrisepoint PCH */ - PCH_KBP, /* Kabypoint PCH */ - PCH_CNP, /* Cannonpoint PCH */ + PCH_KBP, /* Kaby Lake PCH */ + PCH_CNP, /* Cannon Lake PCH */ PCH_NOP, }; @@ -1387,12 +1395,23 @@ struct i915_power_well { bool hw_enabled; u64 domains; /* unique identifier for this power well */ - unsigned long id; + enum i915_power_well_id id; /* * Arbitraty data associated with this power well. Platform and power * well specific. */ - unsigned long data; + union { + struct { + enum dpio_phy phy; + } bxt; + struct { + /* Mask of pipes whose IRQ logic is backed by the pw */ + u8 irq_pipe_mask; + /* The pw is backing the VGA functionality */ + bool has_vga:1; + bool has_fuses:1; + } hsw; + }; const struct i915_power_well_ops *ops; }; @@ -1509,6 +1528,8 @@ struct i915_gpu_error { /* Protected by the above dev->gpu_error.lock. */ struct i915_gpu_state *first_error; + atomic_t pending_fb_pin; + unsigned long missed_irq_rings; /** @@ -1568,6 +1589,7 @@ struct i915_gpu_error { unsigned long flags; #define I915_RESET_BACKOFF 0 #define I915_RESET_HANDOFF 1 +#define I915_RESET_MODESET 2 #define I915_WEDGED (BITS_PER_LONG - 1) #define I915_RESET_ENGINE (I915_WEDGED - I915_NUM_ENGINES) @@ -1883,6 +1905,7 @@ struct i915_workarounds { struct i915_virtual_gpu { bool active; + u32 caps; }; /* used in computing the new watermarks state */ @@ -1902,6 +1925,24 @@ struct i915_oa_reg { u32 value; }; +struct i915_oa_config { + char uuid[UUID_STRING_LEN + 1]; + int id; + + const struct i915_oa_reg *mux_regs; + u32 mux_regs_len; + const struct i915_oa_reg *b_counter_regs; + u32 b_counter_regs_len; + const struct i915_oa_reg *flex_regs; + u32 flex_regs_len; + + struct attribute_group sysfs_metric; + struct attribute *attrs[2]; + struct device_attribute sysfs_metric_id; + + atomic_t ref_count; +}; + struct i915_perf_stream; /** @@ -2014,12 +2055,36 @@ struct i915_perf_stream { * type of configured stream. */ const struct i915_perf_stream_ops *ops; + + /** + * @oa_config: The OA configuration used by the stream. + */ + struct i915_oa_config *oa_config; }; /** * struct i915_oa_ops - Gen specific implementation of an OA unit stream */ struct i915_oa_ops { + /** + * @is_valid_b_counter_reg: Validates register's address for + * programming boolean counters for a particular platform. + */ + bool (*is_valid_b_counter_reg)(struct drm_i915_private *dev_priv, + u32 addr); + + /** + * @is_valid_mux_reg: Validates register's address for programming mux + * for a particular platform. + */ + bool (*is_valid_mux_reg)(struct drm_i915_private *dev_priv, u32 addr); + + /** + * @is_valid_flex_reg: Validates register's address for programming + * flex EU filtering for a particular platform. + */ + bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr); + /** * @init_oa_buffer: Resets the head and tail pointers of the * circular buffer for periodic OA reports. @@ -2037,21 +2102,14 @@ struct i915_oa_ops { */ void (*init_oa_buffer)(struct drm_i915_private *dev_priv); - /** - * @select_metric_set: The auto generated code that checks whether a - * requested OA config is applicable to the system and if so sets up - * the mux, oa and flex eu register config pointers according to the - * current dev_priv->perf.oa.metrics_set. - */ - int (*select_metric_set)(struct drm_i915_private *dev_priv); - /** * @enable_metric_set: Selects and applies any MUX configuration to set * up the Boolean and Custom (B/C) counters that are part of the * counter reports being sampled. May apply system constraints such as * disabling EU clock gating as required. */ - int (*enable_metric_set)(struct drm_i915_private *dev_priv); + int (*enable_metric_set)(struct drm_i915_private *dev_priv, + const struct i915_oa_config *oa_config); /** * @disable_metric_set: Remove system constraints associated with using @@ -2097,6 +2155,7 @@ struct drm_i915_private { struct kmem_cache *objects; struct kmem_cache *vmas; + struct kmem_cache *luts; struct kmem_cache *requests; struct kmem_cache *dependencies; struct kmem_cache *priorities; @@ -2147,9 +2206,6 @@ struct drm_i915_private { /* protects the irq masks */ spinlock_t irq_lock; - /* protects the mmio flip data */ - spinlock_t mmio_flip_lock; - bool display_irqs_enabled; /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ @@ -2254,7 +2310,6 @@ struct drm_i915_private { struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; - wait_queue_head_t pending_flip_queue; #ifdef CONFIG_DEBUG_FS struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; @@ -2415,10 +2470,32 @@ struct drm_i915_private { struct kobject *metrics_kobj; struct ctl_table_header *sysctl_header; + /* + * Lock associated with adding/modifying/removing OA configs + * in dev_priv->perf.metrics_idr. + */ + struct mutex metrics_lock; + + /* + * List of dynamic configurations, you need to hold + * dev_priv->perf.metrics_lock to access it. + */ + struct idr metrics_idr; + + /* + * Lock associated with anything below within this structure + * except exclusive_stream. + */ struct mutex lock; struct list_head streams; struct { + /* + * The stream currently using the OA unit. If accessed + * outside a syscall associated to its file + * descriptor, you need to hold + * dev_priv->drm.struct_mutex. + */ struct i915_perf_stream *exclusive_stream; u32 specific_ctx_id; @@ -2437,16 +2514,7 @@ struct drm_i915_private { int period_exponent; int timestamp_frequency; - int metrics_set; - - const struct i915_oa_reg *mux_regs[6]; - int mux_regs_lens[6]; - int n_mux_configs; - - const struct i915_oa_reg *b_counter_regs; - int b_counter_regs_len; - const struct i915_oa_reg *flex_regs; - int flex_regs_len; + struct i915_oa_config test_config; struct { struct i915_vma *vma; @@ -2533,7 +2601,6 @@ struct drm_i915_private { struct i915_oa_ops ops; const struct i915_oa_format *oa_formats; - int n_builtin_sets; } oa; } perf; @@ -3107,8 +3174,12 @@ extern int i915_driver_load(struct pci_dev *pdev, extern void i915_driver_unload(struct drm_device *dev); extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); -extern void i915_reset(struct drm_i915_private *dev_priv); -extern int i915_reset_engine(struct intel_engine_cs *engine); + +#define I915_RESET_QUIET BIT(0) +extern void i915_reset(struct drm_i915_private *i915, unsigned int flags); +extern int i915_reset_engine(struct intel_engine_cs *engine, + unsigned int flags); + extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv); extern int intel_guc_reset(struct drm_i915_private *dev_priv); extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); @@ -3128,7 +3199,8 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, void intel_hpd_init(struct drm_i915_private *dev_priv); void intel_hpd_init_work(struct drm_i915_private *dev_priv); void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); -bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); +enum port intel_hpd_pin_to_port(enum hpd_pin pin); +enum hpd_pin intel_hpd_pin(enum port port); bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin); void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin); @@ -3297,6 +3369,26 @@ static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) } while (flush_work(&i915->mm.free_work)); } +static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915) +{ + /* + * Similar to objects above (see i915_gem_drain_freed-objects), in + * general we have workers that are armed by RCU and then rearm + * themselves in their callbacks. To be paranoid, we need to + * drain the workqueue a second time after waiting for the RCU + * grace period so that we catch work queued via RCU from the first + * pass. As neither drain_workqueue() nor flush_workqueue() report + * a result, we make an assumption that we only don't require more + * than 2 passes to catch all recursive RCU delayed work. + * + */ + int pass = 2; + do { + rcu_barrier(); + drain_workqueue(i915->wq); + } while (--pass); +} + struct i915_vma * __must_check i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view, @@ -3386,6 +3478,9 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj); enum i915_map_type { I915_MAP_WB = 0, I915_MAP_WC, +#define I915_MAP_OVERRIDE BIT(31) + I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE, + I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE, }; /** @@ -3594,6 +3689,10 @@ i915_gem_context_lookup_timeline(struct i915_gem_context *ctx, int i915_perf_open_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); void i915_oa_init_reg_state(struct intel_engine_cs *engine, struct i915_gem_context *ctx, uint32_t *reg_state); @@ -3645,6 +3744,7 @@ i915_gem_object_create_internal(struct drm_i915_private *dev_priv, /* i915_gem_shrinker.c */ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, unsigned long target, + unsigned long *nr_scanned, unsigned flags); #define I915_SHRINK_PURGEABLE 0x1 #define I915_SHRINK_UNBOUND 0x2 @@ -4081,6 +4181,11 @@ static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) { + /* nsecs_to_jiffies64() does not guard against overflow */ + if (NSEC_PER_SEC % HZ && + div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ) + return MAX_JIFFY_OFFSET; + return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); } @@ -4227,10 +4332,11 @@ int remap_io_mapping(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, struct io_mapping *iomap); -static inline bool i915_gem_object_is_coherent(struct drm_i915_gem_object *obj) +static inline bool +intel_engine_can_store_dword(struct intel_engine_cs *engine) { - return (obj->cache_level != I915_CACHE_NONE || - HAS_LLC(to_i915(obj->base.dev))); + return __intel_engine_can_store_dword(INTEL_GEN(engine->i915), + engine->class); } #endif