Merge tag 'cxl-fixes-6.19-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl
Pull Compute Express Link (CXL) fixes from Dave Jiang: - Recognize all ZONE_DEVICE users as physaddr consumers - Fix format string for extended_linear_cache_size_show() - Fix target list setup for multiple decoders sharing the same downstream port - Restore HBIW check before derefernce platform data - Fix potential infinite loop in __cxl_dpa_reserve() - Check for invalid addresses returned from translation functions on error * tag 'cxl-fixes-6.19-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl: cxl: Check for invalid addresses returned from translation functions on errors cxl/hdm: Fix potential infinite loop in __cxl_dpa_reserve() cxl/acpi: Restore HBIW check before dereferencing platform_data cxl/port: Fix target list setup for multiple decoders sharing the same dport cxl/region: fix format string for resource_size_t x86/kaslr: Recognize all ZONE_DEVICE users as physaddr consumers
This commit is contained in:
@@ -115,12 +115,12 @@ void __init kernel_randomize_memory(void)
|
||||
|
||||
/*
|
||||
* Adapt physical memory region size based on available memory,
|
||||
* except when CONFIG_PCI_P2PDMA is enabled. P2PDMA exposes the
|
||||
* device BAR space assuming the direct map space is large enough
|
||||
* for creating a ZONE_DEVICE mapping in the direct map corresponding
|
||||
* to the physical BAR address.
|
||||
* except when CONFIG_ZONE_DEVICE is enabled. ZONE_DEVICE wants to map
|
||||
* any physical address into the direct-map. KASLR wants to reliably
|
||||
* steal some physical address bits. Those design choices are in direct
|
||||
* conflict.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_PCI_P2PDMA) && (memory_tb < kaslr_regions[0].size_tb))
|
||||
if (!IS_ENABLED(CONFIG_ZONE_DEVICE) && (memory_tb < kaslr_regions[0].size_tb))
|
||||
kaslr_regions[0].size_tb = memory_tb;
|
||||
|
||||
/*
|
||||
|
||||
@@ -75,9 +75,16 @@ EXPORT_SYMBOL_FOR_MODULES(cxl_do_xormap_calc, "cxl_translate");
|
||||
|
||||
static u64 cxl_apply_xor_maps(struct cxl_root_decoder *cxlrd, u64 addr)
|
||||
{
|
||||
struct cxl_cxims_data *cximsd = cxlrd->platform_data;
|
||||
int hbiw = cxlrd->cxlsd.nr_targets;
|
||||
struct cxl_cxims_data *cximsd;
|
||||
|
||||
return cxl_do_xormap_calc(cximsd, addr, cxlrd->cxlsd.nr_targets);
|
||||
/* No xormaps for host bridge interleave ways of 1 or 3 */
|
||||
if (hbiw == 1 || hbiw == 3)
|
||||
return addr;
|
||||
|
||||
cximsd = cxlrd->platform_data;
|
||||
|
||||
return cxl_do_xormap_calc(cximsd, addr, hbiw);
|
||||
}
|
||||
|
||||
struct cxl_cxims_context {
|
||||
|
||||
@@ -403,7 +403,7 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
|
||||
* is not set.
|
||||
*/
|
||||
if (cxled->part < 0)
|
||||
for (int i = 0; cxlds->nr_partitions; i++)
|
||||
for (int i = 0; i < cxlds->nr_partitions; i++)
|
||||
if (resource_contains(&cxlds->part[i].res, res)) {
|
||||
cxled->part = i;
|
||||
break;
|
||||
@@ -530,7 +530,7 @@ resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
|
||||
|
||||
resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
|
||||
{
|
||||
resource_size_t base = -1;
|
||||
resource_size_t base = RESOURCE_SIZE_MAX;
|
||||
|
||||
lockdep_assert_held(&cxl_rwsem.dpa);
|
||||
if (cxled->dpa_res)
|
||||
|
||||
@@ -1590,7 +1590,7 @@ static int update_decoder_targets(struct device *dev, void *data)
|
||||
cxlsd->target[i] = dport;
|
||||
dev_dbg(dev, "dport%d found in target list, index %d\n",
|
||||
dport->port_id, i);
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -759,7 +759,7 @@ static ssize_t extended_linear_cache_size_show(struct device *dev,
|
||||
ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
|
||||
if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
|
||||
return rc;
|
||||
return sysfs_emit(buf, "%#llx\n", p->cache_size);
|
||||
return sysfs_emit(buf, "%pap\n", &p->cache_size);
|
||||
}
|
||||
static DEVICE_ATTR_RO(extended_linear_cache_size);
|
||||
|
||||
@@ -3118,7 +3118,7 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
|
||||
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
|
||||
struct cxl_region_params *p = &cxlr->params;
|
||||
struct cxl_endpoint_decoder *cxled = NULL;
|
||||
u64 dpa_offset, hpa_offset, hpa;
|
||||
u64 base, dpa_offset, hpa_offset, hpa;
|
||||
u16 eig = 0;
|
||||
u8 eiw = 0;
|
||||
int pos;
|
||||
@@ -3136,8 +3136,14 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
|
||||
ways_to_eiw(p->interleave_ways, &eiw);
|
||||
granularity_to_eig(p->interleave_granularity, &eig);
|
||||
|
||||
dpa_offset = dpa - cxl_dpa_resource_start(cxled);
|
||||
base = cxl_dpa_resource_start(cxled);
|
||||
if (base == RESOURCE_SIZE_MAX)
|
||||
return ULLONG_MAX;
|
||||
|
||||
dpa_offset = dpa - base;
|
||||
hpa_offset = cxl_calculate_hpa_offset(dpa_offset, pos, eiw, eig);
|
||||
if (hpa_offset == ULLONG_MAX)
|
||||
return ULLONG_MAX;
|
||||
|
||||
/* Apply the hpa_offset to the region base address */
|
||||
hpa = hpa_offset + p->res->start + p->cache_size;
|
||||
@@ -3146,6 +3152,9 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
|
||||
if (cxlrd->ops.hpa_to_spa)
|
||||
hpa = cxlrd->ops.hpa_to_spa(cxlrd, hpa);
|
||||
|
||||
if (hpa == ULLONG_MAX)
|
||||
return ULLONG_MAX;
|
||||
|
||||
if (!cxl_resource_contains_addr(p->res, hpa)) {
|
||||
dev_dbg(&cxlr->dev,
|
||||
"Addr trans fail: hpa 0x%llx not in region\n", hpa);
|
||||
@@ -3170,7 +3179,8 @@ static int region_offset_to_dpa_result(struct cxl_region *cxlr, u64 offset,
|
||||
struct cxl_region_params *p = &cxlr->params;
|
||||
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
|
||||
struct cxl_endpoint_decoder *cxled;
|
||||
u64 hpa, hpa_offset, dpa_offset;
|
||||
u64 hpa_offset = offset;
|
||||
u64 dpa, dpa_offset;
|
||||
u16 eig = 0;
|
||||
u8 eiw = 0;
|
||||
int pos;
|
||||
@@ -3187,10 +3197,13 @@ static int region_offset_to_dpa_result(struct cxl_region *cxlr, u64 offset,
|
||||
* CXL HPA is assumed to equal SPA.
|
||||
*/
|
||||
if (cxlrd->ops.spa_to_hpa) {
|
||||
hpa = cxlrd->ops.spa_to_hpa(cxlrd, p->res->start + offset);
|
||||
hpa_offset = hpa - p->res->start;
|
||||
} else {
|
||||
hpa_offset = offset;
|
||||
hpa_offset = cxlrd->ops.spa_to_hpa(cxlrd, p->res->start + offset);
|
||||
if (hpa_offset == ULLONG_MAX) {
|
||||
dev_dbg(&cxlr->dev, "HPA not found for %pr offset %#llx\n",
|
||||
p->res, offset);
|
||||
return -ENXIO;
|
||||
}
|
||||
hpa_offset -= p->res->start;
|
||||
}
|
||||
|
||||
pos = cxl_calculate_position(hpa_offset, eiw, eig);
|
||||
@@ -3207,8 +3220,13 @@ static int region_offset_to_dpa_result(struct cxl_region *cxlr, u64 offset,
|
||||
cxled = p->targets[i];
|
||||
if (cxled->pos != pos)
|
||||
continue;
|
||||
|
||||
dpa = cxl_dpa_resource_start(cxled);
|
||||
if (dpa != RESOURCE_SIZE_MAX)
|
||||
dpa += dpa_offset;
|
||||
|
||||
result->cxlmd = cxled_to_memdev(cxled);
|
||||
result->dpa = cxl_dpa_resource_start(cxled) + dpa_offset;
|
||||
result->dpa = dpa;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -225,12 +225,6 @@ config PCI_P2PDMA
|
||||
P2P DMA transactions must be between devices behind the same root
|
||||
port.
|
||||
|
||||
Enabling this option will reduce the entropy of x86 KASLR memory
|
||||
regions. For example - on a 46 bit system, the entropy goes down
|
||||
from 16 bits to 15 bits. The actual reduction in entropy depends
|
||||
on the physical address bits, on processor features, kernel config
|
||||
(5 level page table) and physical memory present on the system.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config PCI_LABEL
|
||||
|
||||
10
mm/Kconfig
10
mm/Kconfig
@@ -1220,10 +1220,14 @@ config ZONE_DEVICE
|
||||
Device memory hotplug support allows for establishing pmem,
|
||||
or other device driver discovered memory regions, in the
|
||||
memmap. This allows pfn_to_page() lookups of otherwise
|
||||
"device-physical" addresses which is needed for using a DAX
|
||||
mapping in an O_DIRECT operation, among other things.
|
||||
"device-physical" addresses which is needed for DAX, PCI_P2PDMA, and
|
||||
DEVICE_PRIVATE features among others.
|
||||
|
||||
If FS_DAX is enabled, then say Y.
|
||||
Enabling this option will reduce the entropy of x86 KASLR memory
|
||||
regions. For example - on a 46 bit system, the entropy goes down
|
||||
from 16 bits to 15 bits. The actual reduction in entropy depends
|
||||
on the physical address bits, on processor features, kernel config
|
||||
(5 level page table) and physical memory present on the system.
|
||||
|
||||
#
|
||||
# Helpers to mirror range of the CPU page tables of a process into device page
|
||||
|
||||
@@ -68,6 +68,8 @@ static u64 to_hpa(u64 dpa_offset, int pos, u8 r_eiw, u16 r_eig, u8 hb_ways,
|
||||
|
||||
/* Calculate base HPA offset from DPA and position */
|
||||
hpa_offset = cxl_calculate_hpa_offset(dpa_offset, pos, r_eiw, r_eig);
|
||||
if (hpa_offset == ULLONG_MAX)
|
||||
return ULLONG_MAX;
|
||||
|
||||
if (math == XOR_MATH) {
|
||||
cximsd->nr_maps = hbiw_to_nr_maps[hb_ways];
|
||||
@@ -258,21 +260,25 @@ static int test_random_params(void)
|
||||
pos = get_random_u32() % ways;
|
||||
dpa = get_random_u64() >> 12;
|
||||
|
||||
reverse_dpa = ULLONG_MAX;
|
||||
reverse_pos = -1;
|
||||
|
||||
hpa = cxl_calculate_hpa_offset(dpa, pos, eiw, eig);
|
||||
if (hpa != ULLONG_MAX) {
|
||||
reverse_dpa = cxl_calculate_dpa_offset(hpa, eiw, eig);
|
||||
reverse_pos = cxl_calculate_position(hpa, eiw, eig);
|
||||
if (reverse_dpa == dpa && reverse_pos == pos)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (reverse_dpa != dpa || reverse_pos != pos) {
|
||||
pr_err("test random iter %d FAIL hpa=%llu, dpa=%llu reverse_dpa=%llu, pos=%d reverse_pos=%d eiw=%u eig=%u\n",
|
||||
i, hpa, dpa, reverse_dpa, pos, reverse_pos, eiw,
|
||||
eig);
|
||||
i, hpa, dpa, reverse_dpa, pos, reverse_pos, eiw, eig);
|
||||
|
||||
if (failures++ > 10) {
|
||||
pr_err("test random too many failures, stop\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
pr_info("..... test random: PASS %d FAIL %d\n", i - failures, failures);
|
||||
|
||||
if (failures)
|
||||
|
||||
Reference in New Issue
Block a user