Merge tag 'drm-fixes-2025-03-14' of https://gitlab.freedesktop.org/drm/kernel
Pull drm fixes from Dave Airlie: "Regular weekly fixes pull, the usual leaders in amdgpu/xe, a couple of i915, and some scattered misc fixes. panic: - two clippy fixes dp_mst - locking fix atomic: - fix redundant DPMS calls i915: - Do cdclk post plane programming later - Bump MMAP_GTT_VERSION: missing indication of partial mmaps support xe: - Release guc ids before cancelling work - Fix new warnings around userptr - Temporaritly disable D3Cold on BMG - Retry and wait longer for GuC PC to start - Remove redundant check in xe_vm_create_ioctl amdgpu: - GC 12.x DCC fix - DC DCE 6.x fix - Hibernation fix - HPD fix - Backlight fixes - Color depth fix - UAF fix in hdcp_work - VCE 2.x fix - GC 12.x PTE fix amdkfd: - Queue eviction fix gma500: - fix NULL pointer check" * tag 'drm-fixes-2025-03-14' of https://gitlab.freedesktop.org/drm/kernel: (23 commits) drm/amdgpu: NULL-check BO's backing store when determining GFX12 PTE flags drm/amd/amdkfd: Evict all queues even HWS remove queue failed drm/i915: Increase I915_PARAM_MMAP_GTT_VERSION version to indicate support for partial mmaps drm/dp_mst: Fix locking when skipping CSN before topology probing drm/amdgpu/vce2: fix ip block reference drm/amd/display: Fix slab-use-after-free on hdcp_work drm/amd/display: Assign normalized_pix_clk when color depth = 14 drm/amd/display: Restore correct backlight brightness after a GPU reset drm/amd/display: fix default brightness drm/amd/display: Disable unneeded hpd interrupts during dm_init drm/amd: Keep display off while going into S4 drm/amd/display: fix missing .is_two_pixels_per_container drm/amdgpu/display: Allow DCC for video formats on GFX12 drm/xe: remove redundant check in xe_vm_create_ioctl() drm/atomic: Filter out redundant DPMS calls drm/xe/guc_pc: Retry and wait longer for GuC PC start drm/xe/pm: Temporarily disable D3Cold on BMG drm/i915/cdclk: Do cdclk post plane programming later drm/xe/userptr: Fix an incorrect assert drm/xe: Release guc ids before cancelling work ...
This commit is contained in:
@@ -2555,7 +2555,6 @@ static int amdgpu_pmops_freeze(struct device *dev)
|
||||
int r;
|
||||
|
||||
r = amdgpu_device_suspend(drm_dev, true);
|
||||
adev->in_s4 = false;
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@@ -2567,8 +2566,13 @@ static int amdgpu_pmops_freeze(struct device *dev)
|
||||
static int amdgpu_pmops_thaw(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
int r;
|
||||
|
||||
return amdgpu_device_resume(drm_dev, true);
|
||||
r = amdgpu_device_resume(drm_dev, true);
|
||||
adev->in_s4 = false;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_pmops_poweroff(struct device *dev)
|
||||
@@ -2581,6 +2585,9 @@ static int amdgpu_pmops_poweroff(struct device *dev)
|
||||
static int amdgpu_pmops_restore(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
|
||||
adev->in_s4 = false;
|
||||
|
||||
return amdgpu_device_resume(drm_dev, true);
|
||||
}
|
||||
|
||||
@@ -528,8 +528,9 @@ static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
|
||||
|
||||
bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT;
|
||||
is_system = (bo->tbo.resource->mem_type == TTM_PL_TT) ||
|
||||
(bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT);
|
||||
is_system = bo->tbo.resource &&
|
||||
(bo->tbo.resource->mem_type == TTM_PL_TT ||
|
||||
bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT);
|
||||
|
||||
if (bo && bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
|
||||
*flags |= AMDGPU_PTE_DCC;
|
||||
|
||||
@@ -284,7 +284,7 @@ static int vce_v2_0_stop(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCN);
|
||||
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE);
|
||||
if (!ip_block)
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
@@ -1230,11 +1230,13 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
|
||||
decrement_queue_count(dqm, qpd, q);
|
||||
|
||||
if (dqm->dev->kfd->shared_resources.enable_mes) {
|
||||
retval = remove_queue_mes(dqm, q, qpd);
|
||||
if (retval) {
|
||||
int err;
|
||||
|
||||
err = remove_queue_mes(dqm, q, qpd);
|
||||
if (err) {
|
||||
dev_err(dev, "Failed to evict queue %d\n",
|
||||
q->properties.queue_id);
|
||||
goto out;
|
||||
retval = err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -245,6 +245,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
|
||||
static void handle_hpd_rx_irq(void *param);
|
||||
|
||||
static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
|
||||
int bl_idx,
|
||||
u32 user_brightness);
|
||||
|
||||
static bool
|
||||
is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
|
||||
struct drm_crtc_state *new_crtc_state);
|
||||
@@ -3371,8 +3375,19 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
|
||||
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
|
||||
/* set the backlight after a reset */
|
||||
for (i = 0; i < dm->num_of_edps; i++) {
|
||||
if (dm->backlight_dev[i])
|
||||
amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* leave display off for S4 sequence */
|
||||
if (adev->in_s4)
|
||||
return 0;
|
||||
|
||||
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
|
||||
dc_state_release(dm_state->context);
|
||||
dm_state->context = dc_state_create(dm->dc, NULL);
|
||||
@@ -4906,6 +4921,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
|
||||
dm->backlight_dev[aconnector->bl_idx] =
|
||||
backlight_device_register(bl_name, aconnector->base.kdev, dm,
|
||||
&amdgpu_dm_backlight_ops, &props);
|
||||
dm->brightness[aconnector->bl_idx] = props.brightness;
|
||||
|
||||
if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
|
||||
DRM_ERROR("DM: Backlight registration failed!\n");
|
||||
@@ -4973,7 +4989,6 @@ static void setup_backlight_device(struct amdgpu_display_manager *dm,
|
||||
aconnector->bl_idx = bl_idx;
|
||||
|
||||
amdgpu_dm_update_backlight_caps(dm, bl_idx);
|
||||
dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL;
|
||||
dm->backlight_link[bl_idx] = link;
|
||||
dm->num_of_edps++;
|
||||
|
||||
|
||||
@@ -455,6 +455,7 @@ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
|
||||
for (i = 0; i < hdcp_work->max_link; i++) {
|
||||
cancel_delayed_work_sync(&hdcp_work[i].callback_dwork);
|
||||
cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
|
||||
cancel_delayed_work_sync(&hdcp_work[i].property_validate_dwork);
|
||||
}
|
||||
|
||||
sysfs_remove_bin_file(kobj, &hdcp_work[0].attr);
|
||||
|
||||
@@ -894,8 +894,16 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
|
||||
struct drm_device *dev = adev_to_drm(adev);
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_list_iter iter;
|
||||
int irq_type;
|
||||
int i;
|
||||
|
||||
/* First, clear all hpd and hpdrx interrupts */
|
||||
for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) {
|
||||
if (!dc_interrupt_set(adev->dm.dc, i, false))
|
||||
drm_err(dev, "Failed to clear hpd(rx) source=%d on init\n",
|
||||
i);
|
||||
}
|
||||
|
||||
drm_connector_list_iter_begin(dev, &iter);
|
||||
drm_for_each_connector_iter(connector, &iter) {
|
||||
struct amdgpu_dm_connector *amdgpu_dm_connector;
|
||||
@@ -908,10 +916,31 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
|
||||
|
||||
dc_link = amdgpu_dm_connector->dc_link;
|
||||
|
||||
/*
|
||||
* Get a base driver irq reference for hpd ints for the lifetime
|
||||
* of dm. Note that only hpd interrupt types are registered with
|
||||
* base driver; hpd_rx types aren't. IOW, amdgpu_irq_get/put on
|
||||
* hpd_rx isn't available. DM currently controls hpd_rx
|
||||
* explicitly with dc_interrupt_set()
|
||||
*/
|
||||
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
|
||||
dc_interrupt_set(adev->dm.dc,
|
||||
dc_link->irq_source_hpd,
|
||||
true);
|
||||
irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1;
|
||||
/*
|
||||
* TODO: There's a mismatch between mode_info.num_hpd
|
||||
* and what bios reports as the # of connectors with hpd
|
||||
* sources. Since the # of hpd source types registered
|
||||
* with base driver == mode_info.num_hpd, we have to
|
||||
* fallback to dc_interrupt_set for the remaining types.
|
||||
*/
|
||||
if (irq_type < adev->mode_info.num_hpd) {
|
||||
if (amdgpu_irq_get(adev, &adev->hpd_irq, irq_type))
|
||||
drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n",
|
||||
dc_link->irq_source_hpd);
|
||||
} else {
|
||||
dc_interrupt_set(adev->dm.dc,
|
||||
dc_link->irq_source_hpd,
|
||||
true);
|
||||
}
|
||||
}
|
||||
|
||||
if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
|
||||
@@ -921,12 +950,6 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
drm_connector_list_iter_end(&iter);
|
||||
|
||||
/* Update reference counts for HPDs */
|
||||
for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
|
||||
if (amdgpu_irq_get(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
|
||||
drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", i);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -942,7 +965,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
|
||||
struct drm_device *dev = adev_to_drm(adev);
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_list_iter iter;
|
||||
int i;
|
||||
int irq_type;
|
||||
|
||||
drm_connector_list_iter_begin(dev, &iter);
|
||||
drm_for_each_connector_iter(connector, &iter) {
|
||||
@@ -956,9 +979,18 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
|
||||
dc_link = amdgpu_dm_connector->dc_link;
|
||||
|
||||
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
|
||||
dc_interrupt_set(adev->dm.dc,
|
||||
dc_link->irq_source_hpd,
|
||||
false);
|
||||
irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1;
|
||||
|
||||
/* TODO: See same TODO in amdgpu_dm_hpd_init() */
|
||||
if (irq_type < adev->mode_info.num_hpd) {
|
||||
if (amdgpu_irq_put(adev, &adev->hpd_irq, irq_type))
|
||||
drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n",
|
||||
dc_link->irq_source_hpd);
|
||||
} else {
|
||||
dc_interrupt_set(adev->dm.dc,
|
||||
dc_link->irq_source_hpd,
|
||||
false);
|
||||
}
|
||||
}
|
||||
|
||||
if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
|
||||
@@ -968,10 +1000,4 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
drm_connector_list_iter_end(&iter);
|
||||
|
||||
/* Update reference counts for HPDs */
|
||||
for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
|
||||
if (amdgpu_irq_put(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
|
||||
drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", i);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -277,8 +277,11 @@ static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev,
|
||||
if (!dcc->enable)
|
||||
return 0;
|
||||
|
||||
if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
|
||||
!dc->cap_funcs.get_dcc_compression_cap)
|
||||
if (adev->family < AMDGPU_FAMILY_GC_12_0_0 &&
|
||||
format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
|
||||
return -EINVAL;
|
||||
|
||||
if (!dc->cap_funcs.get_dcc_compression_cap)
|
||||
return -EINVAL;
|
||||
|
||||
input.format = format;
|
||||
|
||||
@@ -3389,10 +3389,13 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
|
||||
break;
|
||||
case COLOR_DEPTH_121212:
|
||||
normalized_pix_clk = (pix_clk * 36) / 24;
|
||||
break;
|
||||
break;
|
||||
case COLOR_DEPTH_141414:
|
||||
normalized_pix_clk = (pix_clk * 42) / 24;
|
||||
break;
|
||||
case COLOR_DEPTH_161616:
|
||||
normalized_pix_clk = (pix_clk * 48) / 24;
|
||||
break;
|
||||
break;
|
||||
default:
|
||||
ASSERT(0);
|
||||
break;
|
||||
|
||||
@@ -239,6 +239,7 @@ static const struct timing_generator_funcs dce60_tg_funcs = {
|
||||
dce60_timing_generator_enable_advanced_request,
|
||||
.configure_crc = dce60_configure_crc,
|
||||
.get_crc = dce110_get_crc,
|
||||
.is_two_pixels_per_container = dce110_is_two_pixels_per_container,
|
||||
};
|
||||
|
||||
void dce60_timing_generator_construct(
|
||||
|
||||
@@ -4025,6 +4025,22 @@ out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool primary_mstb_probing_is_done(struct drm_dp_mst_topology_mgr *mgr)
|
||||
{
|
||||
bool probing_done = false;
|
||||
|
||||
mutex_lock(&mgr->lock);
|
||||
|
||||
if (mgr->mst_primary && drm_dp_mst_topology_try_get_mstb(mgr->mst_primary)) {
|
||||
probing_done = mgr->mst_primary->link_address_sent;
|
||||
drm_dp_mst_topology_put_mstb(mgr->mst_primary);
|
||||
}
|
||||
|
||||
mutex_unlock(&mgr->lock);
|
||||
|
||||
return probing_done;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
|
||||
struct drm_dp_pending_up_req *up_req)
|
||||
@@ -4055,8 +4071,12 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
|
||||
|
||||
/* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
|
||||
if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
|
||||
dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
|
||||
hotplug = true;
|
||||
if (!primary_mstb_probing_is_done(mgr)) {
|
||||
drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.\n");
|
||||
} else {
|
||||
dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
|
||||
hotplug = true;
|
||||
}
|
||||
}
|
||||
|
||||
drm_dp_mst_topology_put_mstb(mstb);
|
||||
@@ -4138,10 +4158,11 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
|
||||
drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type,
|
||||
false);
|
||||
|
||||
drm_dp_mst_topology_put_mstb(mst_primary);
|
||||
|
||||
if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
|
||||
const struct drm_dp_connection_status_notify *conn_stat =
|
||||
&up_req->msg.u.conn_stat;
|
||||
bool handle_csn;
|
||||
|
||||
drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
|
||||
conn_stat->port_number,
|
||||
@@ -4150,16 +4171,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
|
||||
conn_stat->message_capability_status,
|
||||
conn_stat->input_port,
|
||||
conn_stat->peer_device_type);
|
||||
|
||||
mutex_lock(&mgr->probe_lock);
|
||||
handle_csn = mst_primary->link_address_sent;
|
||||
mutex_unlock(&mgr->probe_lock);
|
||||
|
||||
if (!handle_csn) {
|
||||
drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.");
|
||||
kfree(up_req);
|
||||
goto out_put_primary;
|
||||
}
|
||||
} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
|
||||
const struct drm_dp_resource_status_notify *res_stat =
|
||||
&up_req->msg.u.resource_stat;
|
||||
@@ -4174,9 +4185,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
|
||||
list_add_tail(&up_req->next, &mgr->up_req_list);
|
||||
mutex_unlock(&mgr->up_req_lock);
|
||||
queue_work(system_long_wq, &mgr->up_req_work);
|
||||
|
||||
out_put_primary:
|
||||
drm_dp_mst_topology_put_mstb(mst_primary);
|
||||
out_clear_reply:
|
||||
reset_msg_rx_state(&mgr->up_req_recv);
|
||||
return ret;
|
||||
|
||||
@@ -956,6 +956,10 @@ int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
|
||||
|
||||
if (mode != DRM_MODE_DPMS_ON)
|
||||
mode = DRM_MODE_DPMS_OFF;
|
||||
|
||||
if (connector->dpms == mode)
|
||||
goto out;
|
||||
|
||||
connector->dpms = mode;
|
||||
|
||||
crtc = connector->state->crtc;
|
||||
|
||||
@@ -1427,6 +1427,10 @@ EXPORT_SYMBOL(drm_hdmi_connector_get_output_format_name);
|
||||
* callback. For atomic drivers the remapping to the "ACTIVE" property is
|
||||
* implemented in the DRM core.
|
||||
*
|
||||
* On atomic drivers any DPMS setproperty ioctl where the value does not
|
||||
* change is completely skipped, otherwise a full atomic commit will occur.
|
||||
* On legacy drivers the exact behavior is driver specific.
|
||||
*
|
||||
* Note that this property cannot be set through the MODE_ATOMIC ioctl,
|
||||
* userspace must use "ACTIVE" on the CRTC instead.
|
||||
*
|
||||
|
||||
@@ -545,7 +545,7 @@ impl EncodedMsg<'_> {
|
||||
}
|
||||
self.push(&mut offset, (MODE_STOP, 4));
|
||||
|
||||
let pad_offset = (offset + 7) / 8;
|
||||
let pad_offset = offset.div_ceil(8);
|
||||
for i in pad_offset..self.version.max_data() {
|
||||
self.data[i] = PADDING[(i & 1) ^ (pad_offset & 1)];
|
||||
}
|
||||
@@ -659,7 +659,7 @@ struct QrImage<'a> {
|
||||
impl QrImage<'_> {
|
||||
fn new<'a, 'b>(em: &'b EncodedMsg<'b>, qrdata: &'a mut [u8]) -> QrImage<'a> {
|
||||
let width = em.version.width();
|
||||
let stride = (width + 7) / 8;
|
||||
let stride = width.div_ceil(8);
|
||||
let data = qrdata;
|
||||
|
||||
let mut qr_image = QrImage {
|
||||
@@ -911,16 +911,16 @@ impl QrImage<'_> {
|
||||
///
|
||||
/// * `url`: The base URL of the QR code. It will be encoded as Binary segment.
|
||||
/// * `data`: A pointer to the binary data, to be encoded. if URL is NULL, it
|
||||
/// will be encoded as binary segment, otherwise it will be encoded
|
||||
/// efficiently as a numeric segment, and appended to the URL.
|
||||
/// will be encoded as binary segment, otherwise it will be encoded
|
||||
/// efficiently as a numeric segment, and appended to the URL.
|
||||
/// * `data_len`: Length of the data, that needs to be encoded, must be less
|
||||
/// than data_size.
|
||||
/// than data_size.
|
||||
/// * `data_size`: Size of data buffer, it should be at least 4071 bytes to hold
|
||||
/// a V40 QR code. It will then be overwritten with the QR code image.
|
||||
/// a V40 QR code. It will then be overwritten with the QR code image.
|
||||
/// * `tmp`: A temporary buffer that the QR code encoder will use, to write the
|
||||
/// segments and ECC.
|
||||
/// segments and ECC.
|
||||
/// * `tmp_size`: Size of the temporary buffer, it must be at least 3706 bytes
|
||||
/// long for V40.
|
||||
/// long for V40.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
|
||||
@@ -279,6 +279,11 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
|
||||
0, PCI_DEVFN(2, 0));
|
||||
int ret = -1;
|
||||
|
||||
if (pci_gfx_root == NULL) {
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Get the address of the platform config vbt */
|
||||
pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
|
||||
pci_dev_put(pci_gfx_root);
|
||||
|
||||
@@ -7830,9 +7830,6 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
|
||||
|
||||
intel_program_dpkgc_latency(state);
|
||||
|
||||
if (state->modeset)
|
||||
intel_set_cdclk_post_plane_update(state);
|
||||
|
||||
intel_wait_for_vblank_workers(state);
|
||||
|
||||
/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
|
||||
@@ -7906,6 +7903,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
|
||||
intel_verify_planes(state);
|
||||
|
||||
intel_sagv_post_plane_update(state);
|
||||
if (state->modeset)
|
||||
intel_set_cdclk_post_plane_update(state);
|
||||
intel_pmdemand_post_plane_update(state);
|
||||
|
||||
drm_atomic_helper_commit_hw_done(&state->base);
|
||||
|
||||
@@ -164,6 +164,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
|
||||
* 4 - Support multiple fault handlers per object depending on object's
|
||||
* backing storage (a.k.a. MMAP_OFFSET).
|
||||
*
|
||||
* 5 - Support multiple partial mmaps(mmap part of BO + unmap a offset, multiple
|
||||
* times with different size and offset).
|
||||
*
|
||||
* Restrictions:
|
||||
*
|
||||
* * snoopable objects cannot be accessed via the GTT. It can cause machine
|
||||
@@ -191,7 +194,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
|
||||
*/
|
||||
int i915_gem_mmap_gtt_version(void)
|
||||
{
|
||||
return 4;
|
||||
return 5;
|
||||
}
|
||||
|
||||
static inline struct i915_gtt_view
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
#include "xe_guc_pc.h"
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/ktime.h>
|
||||
|
||||
#include <drm/drm_managed.h>
|
||||
#include <generated/xe_wa_oob.h>
|
||||
@@ -19,6 +20,7 @@
|
||||
#include "xe_gt.h"
|
||||
#include "xe_gt_idle.h"
|
||||
#include "xe_gt_printk.h"
|
||||
#include "xe_gt_throttle.h"
|
||||
#include "xe_gt_types.h"
|
||||
#include "xe_guc.h"
|
||||
#include "xe_guc_ct.h"
|
||||
@@ -49,6 +51,9 @@
|
||||
#define LNL_MERT_FREQ_CAP 800
|
||||
#define BMG_MERT_FREQ_CAP 2133
|
||||
|
||||
#define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */
|
||||
#define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */
|
||||
|
||||
/**
|
||||
* DOC: GuC Power Conservation (PC)
|
||||
*
|
||||
@@ -113,9 +118,10 @@ static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
|
||||
FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
|
||||
|
||||
static int wait_for_pc_state(struct xe_guc_pc *pc,
|
||||
enum slpc_global_state state)
|
||||
enum slpc_global_state state,
|
||||
int timeout_ms)
|
||||
{
|
||||
int timeout_us = 5000; /* rought 5ms, but no need for precision */
|
||||
int timeout_us = 1000 * timeout_ms;
|
||||
int slept, wait = 10;
|
||||
|
||||
xe_device_assert_mem_access(pc_to_xe(pc));
|
||||
@@ -164,7 +170,8 @@ static int pc_action_query_task_state(struct xe_guc_pc *pc)
|
||||
};
|
||||
int ret;
|
||||
|
||||
if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
|
||||
if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
|
||||
SLPC_RESET_TIMEOUT_MS))
|
||||
return -EAGAIN;
|
||||
|
||||
/* Blocking here to ensure the results are ready before reading them */
|
||||
@@ -187,7 +194,8 @@ static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
|
||||
};
|
||||
int ret;
|
||||
|
||||
if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
|
||||
if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
|
||||
SLPC_RESET_TIMEOUT_MS))
|
||||
return -EAGAIN;
|
||||
|
||||
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
|
||||
@@ -208,7 +216,8 @@ static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
|
||||
struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
|
||||
int ret;
|
||||
|
||||
if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
|
||||
if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
|
||||
SLPC_RESET_TIMEOUT_MS))
|
||||
return -EAGAIN;
|
||||
|
||||
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
|
||||
@@ -440,6 +449,15 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
|
||||
return freq;
|
||||
}
|
||||
|
||||
static u32 get_cur_freq(struct xe_gt *gt)
|
||||
{
|
||||
u32 freq;
|
||||
|
||||
freq = xe_mmio_read32(>->mmio, RPNSWREQ);
|
||||
freq = REG_FIELD_GET(REQ_RATIO_MASK, freq);
|
||||
return decode_freq(freq);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_guc_pc_get_cur_freq - Get Current requested frequency
|
||||
* @pc: The GuC PC
|
||||
@@ -463,10 +481,7 @@ int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
*freq = xe_mmio_read32(>->mmio, RPNSWREQ);
|
||||
|
||||
*freq = REG_FIELD_GET(REQ_RATIO_MASK, *freq);
|
||||
*freq = decode_freq(*freq);
|
||||
*freq = get_cur_freq(gt);
|
||||
|
||||
xe_force_wake_put(gt_to_fw(gt), fw_ref);
|
||||
return 0;
|
||||
@@ -1002,6 +1017,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
|
||||
struct xe_gt *gt = pc_to_gt(pc);
|
||||
u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
|
||||
unsigned int fw_ref;
|
||||
ktime_t earlier;
|
||||
int ret;
|
||||
|
||||
xe_gt_assert(gt, xe_device_uc_enabled(xe));
|
||||
@@ -1026,14 +1042,25 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
|
||||
memset(pc->bo->vmap.vaddr, 0, size);
|
||||
slpc_shared_data_write(pc, header.size, size);
|
||||
|
||||
earlier = ktime_get();
|
||||
ret = pc_action_reset(pc);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) {
|
||||
xe_gt_err(gt, "GuC PC Start failed\n");
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
|
||||
SLPC_RESET_TIMEOUT_MS)) {
|
||||
xe_gt_warn(gt, "GuC PC start taking longer than normal [freq = %dMHz (req = %dMHz), perf_limit_reasons = 0x%08X]\n",
|
||||
xe_guc_pc_get_act_freq(pc), get_cur_freq(gt),
|
||||
xe_gt_throttle_get_limit_reasons(gt));
|
||||
|
||||
if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
|
||||
SLPC_RESET_EXTENDED_TIMEOUT_MS)) {
|
||||
xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
xe_gt_warn(gt, "GuC PC excessive start time: %lldms",
|
||||
ktime_ms_delta(ktime_get(), earlier));
|
||||
}
|
||||
|
||||
ret = pc_init_freqs(pc);
|
||||
|
||||
@@ -1246,11 +1246,11 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
|
||||
xe_pm_runtime_get(guc_to_xe(guc));
|
||||
trace_xe_exec_queue_destroy(q);
|
||||
|
||||
release_guc_id(guc, q);
|
||||
if (xe_exec_queue_is_lr(q))
|
||||
cancel_work_sync(&ge->lr_tdr);
|
||||
/* Confirm no work left behind accessing device structures */
|
||||
cancel_delayed_work_sync(&ge->sched.base.work_tdr);
|
||||
release_guc_id(guc, q);
|
||||
xe_sched_entity_fini(&ge->entity);
|
||||
xe_sched_fini(&ge->sched);
|
||||
|
||||
|
||||
@@ -138,13 +138,17 @@ static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
|
||||
i += size;
|
||||
|
||||
if (unlikely(j == st->nents - 1)) {
|
||||
xe_assert(xe, i >= npages);
|
||||
if (i > npages)
|
||||
size -= (i - npages);
|
||||
|
||||
sg_mark_end(sgl);
|
||||
} else {
|
||||
xe_assert(xe, i < npages);
|
||||
}
|
||||
|
||||
sg_set_page(sgl, page, size << PAGE_SHIFT, 0);
|
||||
}
|
||||
xe_assert(xe, i == npages);
|
||||
|
||||
return dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
|
||||
|
||||
@@ -267,6 +267,15 @@ int xe_pm_init_early(struct xe_device *xe)
|
||||
}
|
||||
ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
|
||||
|
||||
static u32 vram_threshold_value(struct xe_device *xe)
|
||||
{
|
||||
/* FIXME: D3Cold temporarily disabled by default on BMG */
|
||||
if (xe->info.platform == XE_BATTLEMAGE)
|
||||
return 0;
|
||||
|
||||
return DEFAULT_VRAM_THRESHOLD;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_pm_init - Initialize Xe Power Management
|
||||
* @xe: xe device instance
|
||||
@@ -277,6 +286,7 @@ ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
|
||||
*/
|
||||
int xe_pm_init(struct xe_device *xe)
|
||||
{
|
||||
u32 vram_threshold;
|
||||
int err;
|
||||
|
||||
/* For now suspend/resume is only allowed with GuC */
|
||||
@@ -290,7 +300,8 @@ int xe_pm_init(struct xe_device *xe)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD);
|
||||
vram_threshold = vram_threshold_value(xe);
|
||||
err = xe_pm_set_vram_threshold(xe, vram_threshold);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1809,9 +1809,6 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
|
||||
args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
|
||||
return -EINVAL;
|
||||
|
||||
if (XE_IOCTL_DBG(xe, args->extensions))
|
||||
return -EINVAL;
|
||||
|
||||
if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
|
||||
flags |= XE_VM_FLAG_SCRATCH_PAGE;
|
||||
if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
|
||||
|
||||
Reference in New Issue
Block a user