2 * Copyright © 2014-2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "intel_guc.h"
26 #include "intel_guc_ads.h"
27 #include "intel_guc_submission.h"
30 static void gen8_guc_raise_irq(struct intel_guc *guc)
32 struct drm_i915_private *dev_priv = guc_to_i915(guc);
34 I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
37 static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
39 GEM_BUG_ON(!guc->send_regs.base);
40 GEM_BUG_ON(!guc->send_regs.count);
41 GEM_BUG_ON(i >= guc->send_regs.count);
43 return _MMIO(guc->send_regs.base + 4 * i);
46 void intel_guc_init_send_regs(struct intel_guc *guc)
48 struct drm_i915_private *dev_priv = guc_to_i915(guc);
49 enum forcewake_domains fw_domains = 0;
52 guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
53 guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
54 BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
56 for (i = 0; i < guc->send_regs.count; i++) {
57 fw_domains |= intel_uncore_forcewake_for_reg(&dev_priv->uncore,
59 FW_REG_READ | FW_REG_WRITE);
61 guc->send_regs.fw_domains = fw_domains;
64 void intel_guc_init_early(struct intel_guc *guc)
66 intel_guc_fw_init_early(guc);
67 intel_guc_ct_init_early(&guc->ct);
68 intel_guc_log_init_early(&guc->log);
70 mutex_init(&guc->send_mutex);
71 spin_lock_init(&guc->irq_lock);
72 guc->send = intel_guc_send_nop;
73 guc->handler = intel_guc_to_host_event_handler_nop;
74 guc->notify = gen8_guc_raise_irq;
77 static int guc_init_wq(struct intel_guc *guc)
79 struct drm_i915_private *dev_priv = guc_to_i915(guc);
82 * GuC log buffer flush work item has to do register access to
83 * send the ack to GuC and this work item, if not synced before
84 * suspend, can potentially get executed after the GFX device is
86 * By marking the WQ as freezable, we don't have to bother about
87 * flushing of this work item from the suspend hooks, the pending
88 * work item if any will be either executed before the suspend
89 * or scheduled later on resume. This way the handling of work
90 * item can be kept same between system suspend & rpm suspend.
92 guc->log.relay.flush_wq =
93 alloc_ordered_workqueue("i915-guc_log",
94 WQ_HIGHPRI | WQ_FREEZABLE);
95 if (!guc->log.relay.flush_wq) {
96 DRM_ERROR("Couldn't allocate workqueue for GuC log\n");
101 * Even though both sending GuC action, and adding a new workitem to
102 * GuC workqueue are serialized (each with its own locking), since
103 * we're using mutliple engines, it's possible that we're going to
104 * issue a preempt request with two (or more - each for different
105 * engine) workitems in GuC queue. In this situation, GuC may submit
106 * all of them, which will make us very confused.
107 * Our preemption contexts may even already be complete - before we
108 * even had the chance to sent the preempt action to GuC!. Rather
109 * than introducing yet another lock, we can just use ordered workqueue
110 * to make sure we're always sending a single preemption request with a
113 if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
114 USES_GUC_SUBMISSION(dev_priv)) {
115 guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt",
117 if (!guc->preempt_wq) {
118 destroy_workqueue(guc->log.relay.flush_wq);
119 DRM_ERROR("Couldn't allocate workqueue for GuC "
128 static void guc_fini_wq(struct intel_guc *guc)
130 struct workqueue_struct *wq;
132 wq = fetch_and_zero(&guc->preempt_wq);
134 destroy_workqueue(wq);
136 wq = fetch_and_zero(&guc->log.relay.flush_wq);
138 destroy_workqueue(wq);
141 int intel_guc_init_misc(struct intel_guc *guc)
143 struct drm_i915_private *i915 = guc_to_i915(guc);
146 ret = guc_init_wq(guc);
150 intel_uc_fw_fetch(i915, &guc->fw);
155 void intel_guc_fini_misc(struct intel_guc *guc)
157 intel_uc_fw_cleanup_fetch(&guc->fw);
161 static int guc_shared_data_create(struct intel_guc *guc)
163 struct i915_vma *vma;
166 vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
170 vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
172 i915_vma_unpin_and_release(&vma, 0);
173 return PTR_ERR(vaddr);
176 guc->shared_data = vma;
177 guc->shared_data_vaddr = vaddr;
182 static void guc_shared_data_destroy(struct intel_guc *guc)
184 i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP);
187 int intel_guc_init(struct intel_guc *guc)
189 struct drm_i915_private *dev_priv = guc_to_i915(guc);
192 ret = guc_shared_data_create(guc);
195 GEM_BUG_ON(!guc->shared_data);
197 ret = intel_guc_log_create(&guc->log);
201 ret = intel_guc_ads_create(guc);
204 GEM_BUG_ON(!guc->ads_vma);
206 if (HAS_GUC_CT(dev_priv)) {
207 ret = intel_guc_ct_init(&guc->ct);
212 /* We need to notify the guc whenever we change the GGTT */
213 i915_ggtt_enable_guc(dev_priv);
218 intel_guc_ads_destroy(guc);
220 intel_guc_log_destroy(&guc->log);
222 guc_shared_data_destroy(guc);
224 intel_uc_fw_cleanup_fetch(&guc->fw);
228 void intel_guc_fini(struct intel_guc *guc)
230 struct drm_i915_private *dev_priv = guc_to_i915(guc);
232 i915_ggtt_disable_guc(dev_priv);
234 if (HAS_GUC_CT(dev_priv))
235 intel_guc_ct_fini(&guc->ct);
237 intel_guc_ads_destroy(guc);
238 intel_guc_log_destroy(&guc->log);
239 guc_shared_data_destroy(guc);
240 intel_uc_fw_cleanup_fetch(&guc->fw);
243 static u32 guc_ctl_debug_flags(struct intel_guc *guc)
245 u32 level = intel_guc_log_get_level(&guc->log);
249 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
250 flags = ads << GUC_ADS_ADDR_SHIFT | GUC_ADS_ENABLED;
252 if (!GUC_LOG_LEVEL_IS_ENABLED(level))
253 flags |= GUC_LOG_DEFAULT_DISABLED;
255 if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
256 flags |= GUC_LOG_DISABLED;
258 flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
259 GUC_LOG_VERBOSITY_SHIFT;
264 static u32 guc_ctl_feature_flags(struct intel_guc *guc)
268 flags |= GUC_CTL_VCS2_ENABLED;
270 if (USES_GUC_SUBMISSION(guc_to_i915(guc)))
271 flags |= GUC_CTL_KERNEL_SUBMISSIONS;
273 flags |= GUC_CTL_DISABLE_SCHEDULER;
278 static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
282 if (USES_GUC_SUBMISSION(guc_to_i915(guc))) {
285 base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
286 ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16;
289 flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) |
290 (ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT);
295 static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
297 u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
300 #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
302 #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
308 BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
309 BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
310 BUILD_BUG_ON(!DPC_BUFFER_SIZE);
311 BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT));
312 BUILD_BUG_ON(!ISR_BUFFER_SIZE);
313 BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT));
315 BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
316 (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
317 BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) >
318 (GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT));
319 BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) >
320 (GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT));
322 flags = GUC_LOG_VALID |
323 GUC_LOG_NOTIFY_ON_HALF_FULL |
325 ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
326 ((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) |
327 ((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) |
328 (offset << GUC_LOG_BUF_ADDR_SHIFT);
337 * Initialise the GuC parameter block before starting the firmware
338 * transfer. These parameters are read by the firmware on startup
339 * and cannot be changed thereafter.
341 void intel_guc_init_params(struct intel_guc *guc)
343 struct drm_i915_private *dev_priv = guc_to_i915(guc);
344 u32 params[GUC_CTL_MAX_DWORDS];
347 memset(params, 0, sizeof(params));
350 * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
351 * second. This ARAR is calculated by:
352 * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
354 params[GUC_CTL_ARAT_HIGH] = 0;
355 params[GUC_CTL_ARAT_LOW] = 100000000;
357 params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
359 params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
360 params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
361 params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
362 params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
364 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
365 DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
368 * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
369 * they are power context saved so it's ok to release forcewake
370 * when we are done here and take it again at xfer time.
372 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_BLITTER);
374 I915_WRITE(SOFT_SCRATCH(0), 0);
376 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
377 I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
379 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_BLITTER);
382 int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
383 u32 *response_buf, u32 response_buf_size)
385 WARN(1, "Unexpected send: action=%#x\n", *action);
389 void intel_guc_to_host_event_handler_nop(struct intel_guc *guc)
391 WARN(1, "Unexpected event: no suitable handler\n");
395 * This function implements the MMIO based host to GuC interface.
397 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
398 u32 *response_buf, u32 response_buf_size)
400 struct drm_i915_private *dev_priv = guc_to_i915(guc);
401 struct intel_uncore *uncore = &dev_priv->uncore;
407 GEM_BUG_ON(len > guc->send_regs.count);
409 /* We expect only action code */
410 GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);
412 /* If CT is available, we expect to use MMIO only during init/fini */
413 GEM_BUG_ON(HAS_GUC_CT(dev_priv) &&
414 *action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
415 *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
417 mutex_lock(&guc->send_mutex);
418 intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
420 for (i = 0; i < len; i++)
421 intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]);
423 intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
425 intel_guc_notify(guc);
428 * No GuC command should ever take longer than 10ms.
429 * Fast commands should still complete in 10us.
431 ret = __intel_wait_for_register_fw(uncore,
432 guc_send_reg(guc, 0),
433 INTEL_GUC_MSG_TYPE_MASK,
434 INTEL_GUC_MSG_TYPE_RESPONSE <<
435 INTEL_GUC_MSG_TYPE_SHIFT,
437 /* If GuC explicitly returned an error, convert it to -EIO */
438 if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status))
442 DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
443 action[0], ret, status);
448 int count = min(response_buf_size, guc->send_regs.count - 1);
450 for (i = 0; i < count; i++)
451 response_buf[i] = I915_READ(guc_send_reg(guc, i + 1));
454 /* Use data from the GuC response as our return value */
455 ret = INTEL_GUC_MSG_TO_DATA(status);
458 intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
459 mutex_unlock(&guc->send_mutex);
464 void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc)
466 struct drm_i915_private *dev_priv = guc_to_i915(guc);
470 * Sample the log buffer flush related bits & clear them out now
471 * itself from the message identity register to minimize the
472 * probability of losing a flush interrupt, when there are back
473 * to back flush interrupts.
474 * There can be a new flush interrupt, for different log buffer
475 * type (like for ISR), whilst Host is handling one (for DPC).
476 * Since same bit is used in message register for ISR & DPC, it
477 * could happen that GuC sets the bit for 2nd interrupt but Host
478 * clears out the bit on handling the 1st interrupt.
480 disable_rpm_wakeref_asserts(dev_priv);
481 spin_lock(&guc->irq_lock);
482 val = I915_READ(SOFT_SCRATCH(15));
483 msg = val & guc->msg_enabled_mask;
484 I915_WRITE(SOFT_SCRATCH(15), val & ~msg);
485 spin_unlock(&guc->irq_lock);
486 enable_rpm_wakeref_asserts(dev_priv);
488 intel_guc_to_host_process_recv_msg(guc, &msg, 1);
491 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
492 const u32 *payload, u32 len)
499 /* Make sure to handle only enabled messages */
500 msg = payload[0] & guc->msg_enabled_mask;
502 if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
503 INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
504 intel_guc_log_handle_flush_event(&guc->log);
509 int intel_guc_sample_forcewake(struct intel_guc *guc)
511 struct drm_i915_private *dev_priv = guc_to_i915(guc);
514 action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
515 /* WaRsDisableCoarsePowerGating:skl,cnl */
516 if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
519 /* bit 0 and 1 are for Render and Media domain separately */
520 action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
522 return intel_guc_send(guc, action, ARRAY_SIZE(action));
526 * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
527 * @guc: intel_guc structure
528 * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
530 * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
531 * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
534 * Return: non-zero code on error
536 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
539 INTEL_GUC_ACTION_AUTHENTICATE_HUC,
543 return intel_guc_send(guc, action, ARRAY_SIZE(action));
547 * The ENTER/EXIT_S_STATE actions queue the save/restore operation in GuC FW and
548 * then return, so waiting on the H2G is not enough to guarantee GuC is done.
549 * When all the processing is done, GuC writes INTEL_GUC_SLEEP_STATE_SUCCESS to
550 * scratch register 14, so we can poll on that. Note that GuC does not ensure
551 * that the value in the register is different from
552 * INTEL_GUC_SLEEP_STATE_SUCCESS while the action is in progress so we need to
553 * take care of that ourselves as well.
555 static int guc_sleep_state_action(struct intel_guc *guc,
556 const u32 *action, u32 len)
558 struct drm_i915_private *dev_priv = guc_to_i915(guc);
562 I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK);
564 ret = intel_guc_send(guc, action, len);
568 ret = __intel_wait_for_register(&dev_priv->uncore, SOFT_SCRATCH(14),
569 INTEL_GUC_SLEEP_STATE_INVALID_MASK,
574 if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) {
575 DRM_ERROR("GuC failed to change sleep state. "
576 "action=0x%x, err=%u\n",
585 * intel_guc_suspend() - notify GuC entering suspend state
588 int intel_guc_suspend(struct intel_guc *guc)
591 INTEL_GUC_ACTION_ENTER_S_STATE,
592 GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
593 intel_guc_ggtt_offset(guc, guc->shared_data)
596 return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
600 * intel_guc_reset_engine() - ask GuC to reset an engine
601 * @guc: intel_guc structure
602 * @engine: engine to be reset
604 int intel_guc_reset_engine(struct intel_guc *guc,
605 struct intel_engine_cs *engine)
609 GEM_BUG_ON(!guc->execbuf_client);
611 data[0] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET;
612 data[1] = engine->guc_id;
616 data[5] = guc->execbuf_client->stage_id;
617 data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
619 return intel_guc_send(guc, data, ARRAY_SIZE(data));
623 * intel_guc_resume() - notify GuC resuming from suspend state
626 int intel_guc_resume(struct intel_guc *guc)
629 INTEL_GUC_ACTION_EXIT_S_STATE,
631 intel_guc_ggtt_offset(guc, guc->shared_data)
634 return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
638 * DOC: GuC Address Space
640 * The layout of GuC address space is shown below:
644 * +===========> +====================+ <== FFFF_FFFF
646 * | +====================+ <== GUC_GGTT_TOP
650 * Address +===> +====================+ <== GuC ggtt_pin_bias
658 * +=======+===> +====================+ <== 0000_0000
660 * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
661 * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
662 * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
666 * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
668 * @size: size of area to allocate (both virtual space and memory)
670 * This is a wrapper to create an object for use with the GuC. In order to
671 * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
672 * both some backing storage and a range inside the Global GTT. We must pin
673 * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
674 * range is reserved inside GuC.
676 * Return: A i915_vma if successful, otherwise an ERR_PTR.
678 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
680 struct drm_i915_private *dev_priv = guc_to_i915(guc);
681 struct drm_i915_gem_object *obj;
682 struct i915_vma *vma;
686 obj = i915_gem_object_create(dev_priv, size);
688 return ERR_CAST(obj);
690 vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
694 flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
695 ret = i915_vma_pin(vma, 0, 0, flags);
704 i915_gem_object_put(obj);
709 * intel_guc_reserved_gtt_size()
710 * @guc: intel_guc structure
712 * The GuC WOPCM mapping shadows the lower part of the GGTT, so if we are using
713 * GuC we can't have any objects pinned in that region. This function returns
714 * the size of the shadowed region.
717 * 0 if GuC is not present or not in use.
718 * Otherwise, the GuC WOPCM size.
720 u32 intel_guc_reserved_gtt_size(struct intel_guc *guc)
722 return guc_to_i915(guc)->wopcm.guc.size;
725 int intel_guc_reserve_ggtt_top(struct intel_guc *guc)
727 struct drm_i915_private *i915 = guc_to_i915(guc);
728 struct i915_ggtt *ggtt = &i915->ggtt;
732 size = ggtt->vm.total - GUC_GGTT_TOP;
734 ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
735 GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
738 DRM_DEBUG_DRIVER("GuC: failed to reserve top of ggtt\n");
743 void intel_guc_release_ggtt_top(struct intel_guc *guc)
745 struct drm_i915_private *i915 = guc_to_i915(guc);
746 struct i915_ggtt *ggtt = &i915->ggtt;
748 if (drm_mm_node_allocated(&ggtt->uc_fw))
749 drm_mm_remove_node(&ggtt->uc_fw);