if (!mutex_is_locked(mutex))
return false;
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */
{
struct drm_i915_private *dev_priv =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_i915_gem_object *obj;
unsigned long count;
bool unlock;
if (!i915_gem_shrinker_lock(dev, &unlock))
return 0;
+ i915_gem_retire_requests(dev_priv);
+
count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (can_release_pages(obj))
{
struct drm_i915_private *dev_priv =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
unsigned long freed;
bool unlock;
{
unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1;
- while (!i915_gem_shrinker_lock(dev_priv->dev, &slu->unlock)) {
+ while (!i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock)) {
schedule_timeout_killable(1);
if (fatal_signal_pending(current))
return false;
{
dev_priv->mm.interruptible = slu->was_interruptible;
if (slu->unlock)
- mutex_unlock(&dev_priv->dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
}
static int
return NOTIFY_DONE;
/* Force everything onto the inactive lists */
- ret = i915_gpu_idle(dev_priv->dev);
+ ret = i915_gem_wait_for_idle(dev_priv);
if (ret)
goto out;