From 4f059d514f7119a4fdd9934189ff31f2c26b2647 Mon Sep 17 00:00:00 2001 From: Andrew Banman Date: Wed, 21 Sep 2016 11:09:21 -0500 Subject: [PATCH] x86/platform/uv/BAU: Add UV4-specific functions Add the UV4-specific function definitions and define an operations struct to implement them in the BAU driver. Many BAU MMRs, although functionally the same, have new addresses on UV4 due to hardware changes. Each MMR requires new read/write functions, but their implementation in the driver does not change. Thus, it is enough to enumerate them in the operations struct for the changes to take effect. Signed-off-by: Andrew Banman Acked-by: Thomas Gleixner Acked-by: Mike Travis Acked-by: Dimitri Sivanich Acked-by: Thomas Gleixner Cc: Linus Torvalds Cc: Peter Zijlstra Cc: akpm@linux-foundation.org Cc: rja@sgi.com Link: http://lkml.kernel.org/r/1474474161-265604-11-git-send-email-abanman@sgi.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/uv/uv_bau.h | 30 ++++++++++++++++++++++++++++++ arch/x86/platform/uv/tlb_uv.c | 15 ++++++++++++++- 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index a7a93a5beb00..57ab86d94d64 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h @@ -664,6 +664,16 @@ static inline void write_gmmr_activation(int pnode, unsigned long mmr_image) write_gmmr(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL, mmr_image); } +static inline void write_mmr_proc_payload_first(int pnode, unsigned long mmr_image) +{ + write_gmmr(pnode, UV4H_LB_PROC_INTD_QUEUE_FIRST, mmr_image); +} + +static inline void write_mmr_proc_payload_last(int pnode, unsigned long mmr_image) +{ + write_gmmr(pnode, UV4H_LB_PROC_INTD_QUEUE_LAST, mmr_image); +} + static inline void write_mmr_payload_first(int pnode, unsigned long mmr_image) { write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, mmr_image); @@ -709,6 +719,26 @@ static inline unsigned long read_gmmr_sw_ack(int pnode) return read_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); } +static inline void write_mmr_proc_sw_ack(unsigned long mr) +{ + uv_write_local_mmr(UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR, mr); +} + +static inline void write_gmmr_proc_sw_ack(int pnode, unsigned long mr) +{ + write_gmmr(pnode, UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR, mr); +} + +static inline unsigned long read_mmr_proc_sw_ack(void) +{ + return read_lmmr(UV4H_LB_PROC_INTD_SOFT_ACK_PENDING); +} + +static inline unsigned long read_gmmr_proc_sw_ack(int pnode) +{ + return read_gmmr(pnode, UV4H_LB_PROC_INTD_SOFT_ACK_PENDING); +} + static inline void write_mmr_data_config(int pnode, unsigned long mr) { uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, mr); diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 7ca0e5c31477..56c5a3a3884a 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -36,6 +36,17 @@ static struct bau_operations uv123_bau_ops = { .write_payload_last = write_mmr_payload_last, }; +static struct bau_operations uv4_bau_ops = { + .bau_gpa_to_offset = uv_gpa_to_soc_phys_ram, + .read_l_sw_ack = read_mmr_proc_sw_ack, + .read_g_sw_ack = read_gmmr_proc_sw_ack, + .write_l_sw_ack = write_mmr_proc_sw_ack, + .write_g_sw_ack = write_gmmr_proc_sw_ack, + .write_payload_first = write_mmr_proc_payload_first, + .write_payload_last = write_mmr_proc_payload_last, +}; + + /* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */ static int timeout_base_ns[] = { 20, @@ -2158,7 +2169,9 @@ static int __init uv_bau_init(void) if (!is_uv_system()) return 0; - if (is_uv3_hub()) + if (is_uv4_hub()) + ops = uv4_bau_ops; + else if (is_uv3_hub()) ops = uv123_bau_ops; else if (is_uv2_hub()) ops = uv123_bau_ops; -- 2.20.1