#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20170717"
-#define DRIVER_TIMESTAMP 1500275179
+#define DRIVER_DATE "20170818"
+#define DRIVER_TIMESTAMP 1503088845
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
* to limit the badly behaving clients access to gpu.
*/
#define I915_MAX_CLIENT_CONTEXT_BANS 3
- int context_bans;
+ atomic_t context_bans;
};
/* Used by dp and fdi links */
u32 swsci_sbcb_sub_functions;
struct opregion_asle *asle;
void *rvda;
+ void *vbt_firmware;
const void *vbt;
u32 vbt_size;
u32 *lid_state;
void (*fdi_link_train)(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state);
void (*init_clock_gating)(struct drm_i915_private *dev_priv);
- int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_request *req,
- uint32_t flags);
void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
/* clock updates for mode set */
/* cursor updates */
bool underrun_detected;
struct work_struct underrun_work;
+ /*
+ * Due to the atomic rules we can't access some structures without the
+ * appropriate locking, so we cache information here in order to avoid
+ * these problems.
+ */
struct intel_fbc_state_cache {
struct i915_vma *vma;
} fb;
} state_cache;
+ /*
+ * This structure contains everything that's relevant to program the
+ * hardware registers. When we want to figure out if we need to disable
+ * and re-enable FBC for a new configuration we just check if there's
+ * something different in the struct. The genx_fbc_activate functions
+ * are supposed to read from it in order to program the registers.
+ */
struct intel_fbc_reg_params {
struct i915_vma *vma;
PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
PCH_SPT, /* Sunrisepoint PCH */
- PCH_KBP, /* Kabypoint PCH */
- PCH_CNP, /* Cannonpoint PCH */
+ PCH_KBP, /* Kaby Lake PCH */
+ PCH_CNP, /* Cannon Lake PCH */
PCH_NOP,
};
bool hw_enabled;
u64 domains;
/* unique identifier for this power well */
- unsigned long id;
+ enum i915_power_well_id id;
/*
* Arbitraty data associated with this power well. Platform and power
* well specific.
*/
- unsigned long data;
+ union {
+ struct {
+ enum dpio_phy phy;
+ } bxt;
+ struct {
+ /* Mask of pipes whose IRQ logic is backed by the pw */
+ u8 irq_pipe_mask;
+ /* The pw is backing the VGA functionality */
+ bool has_vga:1;
+ bool has_fuses:1;
+ } hsw;
+ };
const struct i915_power_well_ops *ops;
};
/* Protected by the above dev->gpu_error.lock. */
struct i915_gpu_state *first_error;
+ atomic_t pending_fb_pin;
+
unsigned long missed_irq_rings;
/**
unsigned long flags;
#define I915_RESET_BACKOFF 0
#define I915_RESET_HANDOFF 1
+#define I915_RESET_MODESET 2
#define I915_WEDGED (BITS_PER_LONG - 1)
#define I915_RESET_ENGINE (I915_WEDGED - I915_NUM_ENGINES)
struct i915_virtual_gpu {
bool active;
+ u32 caps;
};
/* used in computing the new watermarks state */
u32 value;
};
+struct i915_oa_config {
+ char uuid[UUID_STRING_LEN + 1];
+ int id;
+
+ const struct i915_oa_reg *mux_regs;
+ u32 mux_regs_len;
+ const struct i915_oa_reg *b_counter_regs;
+ u32 b_counter_regs_len;
+ const struct i915_oa_reg *flex_regs;
+ u32 flex_regs_len;
+
+ struct attribute_group sysfs_metric;
+ struct attribute *attrs[2];
+ struct device_attribute sysfs_metric_id;
+
+ atomic_t ref_count;
+};
+
struct i915_perf_stream;
/**
* type of configured stream.
*/
const struct i915_perf_stream_ops *ops;
+
+ /**
+ * @oa_config: The OA configuration used by the stream.
+ */
+ struct i915_oa_config *oa_config;
};
/**
* struct i915_oa_ops - Gen specific implementation of an OA unit stream
*/
struct i915_oa_ops {
+ /**
+ * @is_valid_b_counter_reg: Validates register's address for
+ * programming boolean counters for a particular platform.
+ */
+ bool (*is_valid_b_counter_reg)(struct drm_i915_private *dev_priv,
+ u32 addr);
+
+ /**
+ * @is_valid_mux_reg: Validates register's address for programming mux
+ * for a particular platform.
+ */
+ bool (*is_valid_mux_reg)(struct drm_i915_private *dev_priv, u32 addr);
+
+ /**
+ * @is_valid_flex_reg: Validates register's address for programming
+ * flex EU filtering for a particular platform.
+ */
+ bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
+
/**
* @init_oa_buffer: Resets the head and tail pointers of the
* circular buffer for periodic OA reports.
*/
void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
- /**
- * @select_metric_set: The auto generated code that checks whether a
- * requested OA config is applicable to the system and if so sets up
- * the mux, oa and flex eu register config pointers according to the
- * current dev_priv->perf.oa.metrics_set.
- */
- int (*select_metric_set)(struct drm_i915_private *dev_priv);
-
/**
* @enable_metric_set: Selects and applies any MUX configuration to set
* up the Boolean and Custom (B/C) counters that are part of the
* counter reports being sampled. May apply system constraints such as
* disabling EU clock gating as required.
*/
- int (*enable_metric_set)(struct drm_i915_private *dev_priv);
+ int (*enable_metric_set)(struct drm_i915_private *dev_priv,
+ const struct i915_oa_config *oa_config);
/**
* @disable_metric_set: Remove system constraints associated with using
struct kmem_cache *objects;
struct kmem_cache *vmas;
+ struct kmem_cache *luts;
struct kmem_cache *requests;
struct kmem_cache *dependencies;
struct kmem_cache *priorities;
/* protects the irq masks */
spinlock_t irq_lock;
- /* protects the mmio flip data */
- spinlock_t mmio_flip_lock;
-
bool display_irqs_enabled;
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
- wait_queue_head_t pending_flip_queue;
#ifdef CONFIG_DEBUG_FS
struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
struct kobject *metrics_kobj;
struct ctl_table_header *sysctl_header;
+ /*
+ * Lock associated with adding/modifying/removing OA configs
+ * in dev_priv->perf.metrics_idr.
+ */
+ struct mutex metrics_lock;
+
+ /*
+ * List of dynamic configurations, you need to hold
+ * dev_priv->perf.metrics_lock to access it.
+ */
+ struct idr metrics_idr;
+
+ /*
+ * Lock associated with anything below within this structure
+ * except exclusive_stream.
+ */
struct mutex lock;
struct list_head streams;
struct {
+ /*
+ * The stream currently using the OA unit. If accessed
+ * outside a syscall associated to its file
+ * descriptor, you need to hold
+ * dev_priv->drm.struct_mutex.
+ */
struct i915_perf_stream *exclusive_stream;
u32 specific_ctx_id;
int period_exponent;
int timestamp_frequency;
- int metrics_set;
-
- const struct i915_oa_reg *mux_regs[6];
- int mux_regs_lens[6];
- int n_mux_configs;
-
- const struct i915_oa_reg *b_counter_regs;
- int b_counter_regs_len;
- const struct i915_oa_reg *flex_regs;
- int flex_regs_len;
+ struct i915_oa_config test_config;
struct {
struct i915_vma *vma;
struct i915_oa_ops ops;
const struct i915_oa_format *oa_formats;
- int n_builtin_sets;
} oa;
} perf;
extern void i915_driver_unload(struct drm_device *dev);
extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
-extern void i915_reset(struct drm_i915_private *dev_priv);
-extern int i915_reset_engine(struct intel_engine_cs *engine);
+
+#define I915_RESET_QUIET BIT(0)
+extern void i915_reset(struct drm_i915_private *i915, unsigned int flags);
+extern int i915_reset_engine(struct intel_engine_cs *engine,
+ unsigned int flags);
+
extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv);
extern int intel_guc_reset(struct drm_i915_private *dev_priv);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
void intel_hpd_init(struct drm_i915_private *dev_priv);
void intel_hpd_init_work(struct drm_i915_private *dev_priv);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
-bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
+enum port intel_hpd_pin_to_port(enum hpd_pin pin);
+enum hpd_pin intel_hpd_pin(enum port port);
bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
} while (flush_work(&i915->mm.free_work));
}
+static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
+{
+ /*
+ * Similar to objects above (see i915_gem_drain_freed-objects), in
+ * general we have workers that are armed by RCU and then rearm
+ * themselves in their callbacks. To be paranoid, we need to
+ * drain the workqueue a second time after waiting for the RCU
+ * grace period so that we catch work queued via RCU from the first
+ * pass. As neither drain_workqueue() nor flush_workqueue() report
+ * a result, we make an assumption that we only don't require more
+ * than 2 passes to catch all recursive RCU delayed work.
+ *
+ */
+ int pass = 2;
+ do {
+ rcu_barrier();
+ drain_workqueue(i915->wq);
+ } while (--pass);
+}
+
struct i915_vma * __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
enum i915_map_type {
I915_MAP_WB = 0,
I915_MAP_WC,
+#define I915_MAP_OVERRIDE BIT(31)
+ I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
+ I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
};
/**
int i915_perf_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
void i915_oa_init_reg_state(struct intel_engine_cs *engine,
struct i915_gem_context *ctx,
uint32_t *reg_state);
/* i915_gem_shrinker.c */
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
unsigned long target,
+ unsigned long *nr_scanned,
unsigned flags);
#define I915_SHRINK_PURGEABLE 0x1
#define I915_SHRINK_UNBOUND 0x2
static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
{
+ /* nsecs_to_jiffies64() does not guard against overflow */
+ if (NSEC_PER_SEC % HZ &&
+ div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
+ return MAX_JIFFY_OFFSET;
+
return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
}
unsigned long addr, unsigned long pfn, unsigned long size,
struct io_mapping *iomap);
-static inline bool i915_gem_object_is_coherent(struct drm_i915_gem_object *obj)
+static inline bool
+intel_engine_can_store_dword(struct intel_engine_cs *engine)
{
- return (obj->cache_level != I915_CACHE_NONE ||
- HAS_LLC(to_i915(obj->base.dev)));
+ return __intel_engine_can_store_dword(INTEL_GEN(engine->i915),
+ engine->class);
}
#endif