x86/platform/uv/BAU: Clean up vertical alignment
authorAndrew Banman <abanman@sgi.com>
Wed, 21 Sep 2016 16:09:12 +0000 (11:09 -0500)
committerIngo Molnar <mingo@kernel.org>
Thu, 22 Sep 2016 09:16:12 +0000 (11:16 +0200)
Fix whitespace on blocks of code to be vertically aligned.

Signed-off-by: Andrew Banman <abanman@sgi.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Mike Travis <travis@sgi.com>
Acked-by: Dimitri Sivanich <sivanich@sgi.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: akpm@linux-foundation.org
Cc: rja@sgi.com
Link: http://lkml.kernel.org/r/1474474161-265604-2-git-send-email-abanman@sgi.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/platform/uv/tlb_uv.c

index fdb4d42..b84c2a2 100644 (file)
@@ -55,16 +55,16 @@ static int congested_reps   = CONGESTED_REPS;
 static int disabled_period     = DISABLED_PERIOD;
 
 static struct tunables tunables[] = {
 static int disabled_period     = DISABLED_PERIOD;
 
 static struct tunables tunables[] = {
-       {&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */
-       {&plugged_delay, PLUGGED_DELAY},
-       {&plugsb4reset, PLUGSB4RESET},
-       {&timeoutsb4reset, TIMEOUTSB4RESET},
-       {&ipi_reset_limit, IPI_RESET_LIMIT},
-       {&complete_threshold, COMPLETE_THRESHOLD},
-       {&congested_respns_us, CONGESTED_RESPONSE_US},
-       {&congested_reps, CONGESTED_REPS},
-       {&disabled_period, DISABLED_PERIOD},
-       {&giveup_limit, GIVEUP_LIMIT}
+       {&max_concurr,           MAX_BAU_CONCURRENT}, /* must be [0] */
+       {&plugged_delay,         PLUGGED_DELAY},
+       {&plugsb4reset,          PLUGSB4RESET},
+       {&timeoutsb4reset,       TIMEOUTSB4RESET},
+       {&ipi_reset_limit,       IPI_RESET_LIMIT},
+       {&complete_threshold,    COMPLETE_THRESHOLD},
+       {&congested_respns_us,   CONGESTED_RESPONSE_US},
+       {&congested_reps,        CONGESTED_REPS},
+       {&disabled_period,       DISABLED_PERIOD},
+       {&giveup_limit,          GIVEUP_LIMIT}
 };
 
 static struct dentry *tunables_dir;
 };
 
 static struct dentry *tunables_dir;
@@ -1619,17 +1619,17 @@ static ssize_t tunables_write(struct file *file, const char __user *user,
 
        for_each_present_cpu(cpu) {
                bcp = &per_cpu(bau_control, cpu);
 
        for_each_present_cpu(cpu) {
                bcp = &per_cpu(bau_control, cpu);
-               bcp->max_concurr =              max_concurr;
-               bcp->max_concurr_const =        max_concurr;
-               bcp->plugged_delay =            plugged_delay;
-               bcp->plugsb4reset =             plugsb4reset;
-               bcp->timeoutsb4reset =          timeoutsb4reset;
-               bcp->ipi_reset_limit =          ipi_reset_limit;
-               bcp->complete_threshold =       complete_threshold;
-               bcp->cong_response_us =         congested_respns_us;
-               bcp->cong_reps =                congested_reps;
-               bcp->disabled_period =          sec_2_cycles(disabled_period);
-               bcp->giveup_limit =             giveup_limit;
+               bcp->max_concurr         = max_concurr;
+               bcp->max_concurr_const   = max_concurr;
+               bcp->plugged_delay       = plugged_delay;
+               bcp->plugsb4reset        = plugsb4reset;
+               bcp->timeoutsb4reset     = timeoutsb4reset;
+               bcp->ipi_reset_limit     = ipi_reset_limit;
+               bcp->complete_threshold  = complete_threshold;
+               bcp->cong_response_us    = congested_respns_us;
+               bcp->cong_reps           = congested_reps;
+               bcp->disabled_period     = sec_2_cycles(disabled_period);
+               bcp->giveup_limit        = giveup_limit;
        }
        return count;
 }
        }
        return count;
 }
@@ -1740,7 +1740,7 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
                memset(bd2, 0, sizeof(struct bau_desc));
                if (uv1) {
                        uv1_hdr = &bd2->header.uv1_hdr;
                memset(bd2, 0, sizeof(struct bau_desc));
                if (uv1) {
                        uv1_hdr = &bd2->header.uv1_hdr;
-                       uv1_hdr->swack_flag =   1;
+                       uv1_hdr->swack_flag = 1;
                        /*
                         * The base_dest_nasid set in the message header
                         * is the nasid of the first uvhub in the partition.
                        /*
                         * The base_dest_nasid set in the message header
                         * is the nasid of the first uvhub in the partition.
@@ -1749,10 +1749,10 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
                         * if nasid striding is being used.
                         */
                        uv1_hdr->base_dest_nasid =
                         * if nasid striding is being used.
                         */
                        uv1_hdr->base_dest_nasid =
-                                               UV_PNODE_TO_NASID(base_pnode);
-                       uv1_hdr->dest_subnodeid =       UV_LB_SUBNODEID;
-                       uv1_hdr->command =              UV_NET_ENDPOINT_INTD;
-                       uv1_hdr->int_both =             1;
+                                                 UV_PNODE_TO_NASID(base_pnode);
+                       uv1_hdr->dest_subnodeid  = UV_LB_SUBNODEID;
+                       uv1_hdr->command         = UV_NET_ENDPOINT_INTD;
+                       uv1_hdr->int_both        = 1;
                        /*
                         * all others need to be set to zero:
                         *   fairness chaining multilevel count replied_to
                        /*
                         * all others need to be set to zero:
                         *   fairness chaining multilevel count replied_to
@@ -1763,11 +1763,11 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
                         * uses native mode for selective broadcasts.
                         */
                        uv2_3_hdr = &bd2->header.uv2_3_hdr;
                         * uses native mode for selective broadcasts.
                         */
                        uv2_3_hdr = &bd2->header.uv2_3_hdr;
-                       uv2_3_hdr->swack_flag = 1;
+                       uv2_3_hdr->swack_flag      = 1;
                        uv2_3_hdr->base_dest_nasid =
                        uv2_3_hdr->base_dest_nasid =
-                                               UV_PNODE_TO_NASID(base_pnode);
-                       uv2_3_hdr->dest_subnodeid =     UV_LB_SUBNODEID;
-                       uv2_3_hdr->command =            UV_NET_ENDPOINT_INTD;
+                                                 UV_PNODE_TO_NASID(base_pnode);
+                       uv2_3_hdr->dest_subnodeid  = UV_LB_SUBNODEID;
+                       uv2_3_hdr->command         = UV_NET_ENDPOINT_INTD;
                }
        }
        for_each_present_cpu(cpu) {
                }
        }
        for_each_present_cpu(cpu) {
@@ -1914,8 +1914,8 @@ static void __init init_per_cpu_tunables(void)
                bcp->complete_threshold         = complete_threshold;
                bcp->cong_response_us           = congested_respns_us;
                bcp->cong_reps                  = congested_reps;
                bcp->complete_threshold         = complete_threshold;
                bcp->cong_response_us           = congested_respns_us;
                bcp->cong_reps                  = congested_reps;
-               bcp->disabled_period =          sec_2_cycles(disabled_period);
-               bcp->giveup_limit =             giveup_limit;
+               bcp->disabled_period            = sec_2_cycles(disabled_period);
+               bcp->giveup_limit               = giveup_limit;
                spin_lock_init(&bcp->queue_lock);
                spin_lock_init(&bcp->uvhub_lock);
                spin_lock_init(&bcp->disable_lock);
                spin_lock_init(&bcp->queue_lock);
                spin_lock_init(&bcp->uvhub_lock);
                spin_lock_init(&bcp->disable_lock);