Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
[cascardo/linux.git] / drivers / staging / unisys / visorbus / visorchipset.c
1 /* visorchipset_main.c
2  *
3  * Copyright (C) 2010 - 2015 UNISYS CORPORATION
4  * All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  * NON INFRINGEMENT.  See the GNU General Public License for more
14  * details.
15  */
16
17 #include <linux/acpi.h>
18 #include <linux/cdev.h>
19 #include <linux/ctype.h>
20 #include <linux/fs.h>
21 #include <linux/mm.h>
22 #include <linux/nls.h>
23 #include <linux/netdevice.h>
24 #include <linux/platform_device.h>
25 #include <linux/uuid.h>
26 #include <linux/crash_dump.h>
27
28 #include "channel_guid.h"
29 #include "controlvmchannel.h"
30 #include "controlvmcompletionstatus.h"
31 #include "guestlinuxdebug.h"
32 #include "periodic_work.h"
33 #include "version.h"
34 #include "visorbus.h"
35 #include "visorbus_private.h"
36 #include "vmcallinterface.h"
37
38 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
39
40 #define MAX_NAME_SIZE 128
41 #define MAX_IP_SIZE   50
42 #define MAXOUTSTANDINGCHANNELCOMMAND 256
43 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST   1
44 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
45
46 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
47
48 #define VISORCHIPSET_MMAP_CONTROLCHANOFFSET     0x00000000
49
50
51 #define UNISYS_SPAR_LEAF_ID 0x40000000
52
53 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
54 #define UNISYS_SPAR_ID_EBX 0x73696e55
55 #define UNISYS_SPAR_ID_ECX 0x70537379
56 #define UNISYS_SPAR_ID_EDX 0x34367261
57
58 /*
59  * Module parameters
60  */
61 static int visorchipset_major;
62 static int visorchipset_visorbusregwait = 1;    /* default is on */
63 static int visorchipset_holdchipsetready;
64 static unsigned long controlvm_payload_bytes_buffered;
65
66 static int
67 visorchipset_open(struct inode *inode, struct file *file)
68 {
69         unsigned minor_number = iminor(inode);
70
71         if (minor_number)
72                 return -ENODEV;
73         file->private_data = NULL;
74         return 0;
75 }
76
77 static int
78 visorchipset_release(struct inode *inode, struct file *file)
79 {
80         return 0;
81 }
82
83 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
84 * we switch to slow polling mode.  As soon as we get a controlvm
85 * message, we switch back to fast polling mode.
86 */
87 #define MIN_IDLE_SECONDS 10
88 static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
89 static unsigned long most_recent_message_jiffies;       /* when we got our last
90                                                  * controlvm message */
91 static int visorbusregistered;
92
93 #define MAX_CHIPSET_EVENTS 2
94 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
95
96 struct parser_context {
97         unsigned long allocbytes;
98         unsigned long param_bytes;
99         u8 *curr;
100         unsigned long bytes_remaining;
101         bool byte_stream;
102         char data[0];
103 };
104
105 static struct delayed_work periodic_controlvm_work;
106 static struct workqueue_struct *periodic_controlvm_workqueue;
107 static DEFINE_SEMAPHORE(notifier_lock);
108
109 static struct cdev file_cdev;
110 static struct visorchannel **file_controlvm_channel;
111 static struct controlvm_message_header g_chipset_msg_hdr;
112 static struct controlvm_message_packet g_devicechangestate_packet;
113
114 static LIST_HEAD(bus_info_list);
115 static LIST_HEAD(dev_info_list);
116
117 static struct visorchannel *controlvm_channel;
118
119 /* Manages the request payload in the controlvm channel */
120 struct visor_controlvm_payload_info {
121         u8 __iomem *ptr;        /* pointer to base address of payload pool */
122         u64 offset;             /* offset from beginning of controlvm
123                                  * channel to beginning of payload * pool */
124         u32 bytes;              /* number of bytes in payload pool */
125 };
126
127 static struct visor_controlvm_payload_info controlvm_payload_info;
128
129 /* The following globals are used to handle the scenario where we are unable to
130  * offload the payload from a controlvm message due to memory requirements.  In
131  * this scenario, we simply stash the controlvm message, then attempt to
132  * process it again the next time controlvm_periodic_work() runs.
133  */
134 static struct controlvm_message controlvm_pending_msg;
135 static bool controlvm_pending_msg_valid;
136
137 /* This identifies a data buffer that has been received via a controlvm messages
138  * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
139  */
140 struct putfile_buffer_entry {
141         struct list_head next;  /* putfile_buffer_entry list */
142         struct parser_context *parser_ctx; /* points to input data buffer */
143 };
144
145 /* List of struct putfile_request *, via next_putfile_request member.
146  * Each entry in this list identifies an outstanding TRANSMIT_FILE
147  * conversation.
148  */
149 static LIST_HEAD(putfile_request_list);
150
151 /* This describes a buffer and its current state of transfer (e.g., how many
152  * bytes have already been supplied as putfile data, and how many bytes are
153  * remaining) for a putfile_request.
154  */
155 struct putfile_active_buffer {
156         /* a payload from a controlvm message, containing a file data buffer */
157         struct parser_context *parser_ctx;
158         /* points within data area of parser_ctx to next byte of data */
159         u8 *pnext;
160         /* # bytes left from <pnext> to the end of this data buffer */
161         size_t bytes_remaining;
162 };
163
164 #define PUTFILE_REQUEST_SIG 0x0906101302281211
165 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
166  * conversation.  Structs of this type are dynamically linked into
167  * <Putfile_request_list>.
168  */
169 struct putfile_request {
170         u64 sig;                /* PUTFILE_REQUEST_SIG */
171
172         /* header from original TransmitFile request */
173         struct controlvm_message_header controlvm_header;
174         u64 file_request_number;        /* from original TransmitFile request */
175
176         /* link to next struct putfile_request */
177         struct list_head next_putfile_request;
178
179         /* most-recent sequence number supplied via a controlvm message */
180         u64 data_sequence_number;
181
182         /* head of putfile_buffer_entry list, which describes the data to be
183          * supplied as putfile data;
184          * - this list is added to when controlvm messages come in that supply
185          * file data
186          * - this list is removed from via the hotplug program that is actually
187          * consuming these buffers to write as file data */
188         struct list_head input_buffer_list;
189         spinlock_t req_list_lock;       /* lock for input_buffer_list */
190
191         /* waiters for input_buffer_list to go non-empty */
192         wait_queue_head_t input_buffer_wq;
193
194         /* data not yet read within current putfile_buffer_entry */
195         struct putfile_active_buffer active_buf;
196
197         /* <0 = failed, 0 = in-progress, >0 = successful; */
198         /* note that this must be set with req_list_lock, and if you set <0, */
199         /* it is your responsibility to also free up all of the other objects */
200         /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
201         /* before releasing the lock */
202         int completion_status;
203 };
204
205 struct parahotplug_request {
206         struct list_head list;
207         int id;
208         unsigned long expiration;
209         struct controlvm_message msg;
210 };
211
212 static LIST_HEAD(parahotplug_request_list);
213 static DEFINE_SPINLOCK(parahotplug_request_list_lock);  /* lock for above */
214 static void parahotplug_process_list(void);
215
216 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
217  * CONTROLVM_REPORTEVENT.
218  */
219 static struct visorchipset_busdev_notifiers busdev_notifiers;
220
221 static void bus_create_response(struct visor_device *p, int response);
222 static void bus_destroy_response(struct visor_device *p, int response);
223 static void device_create_response(struct visor_device *p, int response);
224 static void device_destroy_response(struct visor_device *p, int response);
225 static void device_resume_response(struct visor_device *p, int response);
226
227 static void visorchipset_device_pause_response(struct visor_device *p,
228                                                int response);
229
230 static struct visorchipset_busdev_responders busdev_responders = {
231         .bus_create = bus_create_response,
232         .bus_destroy = bus_destroy_response,
233         .device_create = device_create_response,
234         .device_destroy = device_destroy_response,
235         .device_pause = visorchipset_device_pause_response,
236         .device_resume = device_resume_response,
237 };
238
239 /* info for /dev/visorchipset */
240 static dev_t major_dev = -1; /**< indicates major num for device */
241
242 /* prototypes for attributes */
243 static ssize_t toolaction_show(struct device *dev,
244                                struct device_attribute *attr, char *buf);
245 static ssize_t toolaction_store(struct device *dev,
246                                 struct device_attribute *attr,
247                                 const char *buf, size_t count);
248 static DEVICE_ATTR_RW(toolaction);
249
250 static ssize_t boottotool_show(struct device *dev,
251                                struct device_attribute *attr, char *buf);
252 static ssize_t boottotool_store(struct device *dev,
253                                 struct device_attribute *attr, const char *buf,
254                                 size_t count);
255 static DEVICE_ATTR_RW(boottotool);
256
257 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
258                           char *buf);
259 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
260                            const char *buf, size_t count);
261 static DEVICE_ATTR_RW(error);
262
263 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
264                            char *buf);
265 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
266                             const char *buf, size_t count);
267 static DEVICE_ATTR_RW(textid);
268
269 static ssize_t remaining_steps_show(struct device *dev,
270                                     struct device_attribute *attr, char *buf);
271 static ssize_t remaining_steps_store(struct device *dev,
272                                      struct device_attribute *attr,
273                                      const char *buf, size_t count);
274 static DEVICE_ATTR_RW(remaining_steps);
275
276 static ssize_t chipsetready_store(struct device *dev,
277                                   struct device_attribute *attr,
278                                   const char *buf, size_t count);
279 static DEVICE_ATTR_WO(chipsetready);
280
281 static ssize_t devicedisabled_store(struct device *dev,
282                                     struct device_attribute *attr,
283                                     const char *buf, size_t count);
284 static DEVICE_ATTR_WO(devicedisabled);
285
286 static ssize_t deviceenabled_store(struct device *dev,
287                                    struct device_attribute *attr,
288                                    const char *buf, size_t count);
289 static DEVICE_ATTR_WO(deviceenabled);
290
291 static struct attribute *visorchipset_install_attrs[] = {
292         &dev_attr_toolaction.attr,
293         &dev_attr_boottotool.attr,
294         &dev_attr_error.attr,
295         &dev_attr_textid.attr,
296         &dev_attr_remaining_steps.attr,
297         NULL
298 };
299
300 static struct attribute_group visorchipset_install_group = {
301         .name = "install",
302         .attrs = visorchipset_install_attrs
303 };
304
305 static struct attribute *visorchipset_guest_attrs[] = {
306         &dev_attr_chipsetready.attr,
307         NULL
308 };
309
310 static struct attribute_group visorchipset_guest_group = {
311         .name = "guest",
312         .attrs = visorchipset_guest_attrs
313 };
314
315 static struct attribute *visorchipset_parahotplug_attrs[] = {
316         &dev_attr_devicedisabled.attr,
317         &dev_attr_deviceenabled.attr,
318         NULL
319 };
320
321 static struct attribute_group visorchipset_parahotplug_group = {
322         .name = "parahotplug",
323         .attrs = visorchipset_parahotplug_attrs
324 };
325
326 static const struct attribute_group *visorchipset_dev_groups[] = {
327         &visorchipset_install_group,
328         &visorchipset_guest_group,
329         &visorchipset_parahotplug_group,
330         NULL
331 };
332
333 static void visorchipset_dev_release(struct device *dev)
334 {
335 }
336
337 /* /sys/devices/platform/visorchipset */
338 static struct platform_device visorchipset_platform_device = {
339         .name = "visorchipset",
340         .id = -1,
341         .dev.groups = visorchipset_dev_groups,
342         .dev.release = visorchipset_dev_release,
343 };
344
345 /* Function prototypes */
346 static void controlvm_respond(struct controlvm_message_header *msg_hdr,
347                               int response);
348 static void controlvm_respond_chipset_init(
349                 struct controlvm_message_header *msg_hdr, int response,
350                 enum ultra_chipset_feature features);
351 static void controlvm_respond_physdev_changestate(
352                 struct controlvm_message_header *msg_hdr, int response,
353                 struct spar_segment_state state);
354
355
356 static void parser_done(struct parser_context *ctx);
357
358 static struct parser_context *
359 parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
360 {
361         int allocbytes = sizeof(struct parser_context) + bytes;
362         struct parser_context *rc = NULL;
363         struct parser_context *ctx = NULL;
364
365         if (retry)
366                 *retry = false;
367
368         /*
369          * alloc an 0 extra byte to ensure payload is
370          * '\0'-terminated
371          */
372         allocbytes++;
373         if ((controlvm_payload_bytes_buffered + bytes)
374             > MAX_CONTROLVM_PAYLOAD_BYTES) {
375                 if (retry)
376                         *retry = true;
377                 rc = NULL;
378                 goto cleanup;
379         }
380         ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY);
381         if (!ctx) {
382                 if (retry)
383                         *retry = true;
384                 rc = NULL;
385                 goto cleanup;
386         }
387
388         ctx->allocbytes = allocbytes;
389         ctx->param_bytes = bytes;
390         ctx->curr = NULL;
391         ctx->bytes_remaining = 0;
392         ctx->byte_stream = false;
393         if (local) {
394                 void *p;
395
396                 if (addr > virt_to_phys(high_memory - 1)) {
397                         rc = NULL;
398                         goto cleanup;
399                 }
400                 p = __va((unsigned long) (addr));
401                 memcpy(ctx->data, p, bytes);
402         } else {
403                 void __iomem *mapping;
404
405                 if (!request_mem_region(addr, bytes, "visorchipset")) {
406                         rc = NULL;
407                         goto cleanup;
408                 }
409
410                 mapping = ioremap_cache(addr, bytes);
411                 if (!mapping) {
412                         release_mem_region(addr, bytes);
413                         rc = NULL;
414                         goto cleanup;
415                 }
416                 memcpy_fromio(ctx->data, mapping, bytes);
417                 release_mem_region(addr, bytes);
418         }
419
420         ctx->byte_stream = true;
421         rc = ctx;
422 cleanup:
423         if (rc) {
424                 controlvm_payload_bytes_buffered += ctx->param_bytes;
425         } else {
426                 if (ctx) {
427                         parser_done(ctx);
428                         ctx = NULL;
429                 }
430         }
431         return rc;
432 }
433
434 static uuid_le
435 parser_id_get(struct parser_context *ctx)
436 {
437         struct spar_controlvm_parameters_header *phdr = NULL;
438
439         if (ctx == NULL)
440                 return NULL_UUID_LE;
441         phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
442         return phdr->id;
443 }
444
445 /** Describes the state from the perspective of which controlvm messages have
446  *  been received for a bus or device.
447  */
448
449 enum PARSER_WHICH_STRING {
450         PARSERSTRING_INITIATOR,
451         PARSERSTRING_TARGET,
452         PARSERSTRING_CONNECTION,
453         PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
454 };
455
456 static void
457 parser_param_start(struct parser_context *ctx,
458                    enum PARSER_WHICH_STRING which_string)
459 {
460         struct spar_controlvm_parameters_header *phdr = NULL;
461
462         if (ctx == NULL)
463                 goto Away;
464         phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
465         switch (which_string) {
466         case PARSERSTRING_INITIATOR:
467                 ctx->curr = ctx->data + phdr->initiator_offset;
468                 ctx->bytes_remaining = phdr->initiator_length;
469                 break;
470         case PARSERSTRING_TARGET:
471                 ctx->curr = ctx->data + phdr->target_offset;
472                 ctx->bytes_remaining = phdr->target_length;
473                 break;
474         case PARSERSTRING_CONNECTION:
475                 ctx->curr = ctx->data + phdr->connection_offset;
476                 ctx->bytes_remaining = phdr->connection_length;
477                 break;
478         case PARSERSTRING_NAME:
479                 ctx->curr = ctx->data + phdr->name_offset;
480                 ctx->bytes_remaining = phdr->name_length;
481                 break;
482         default:
483                 break;
484         }
485
486 Away:
487         return;
488 }
489
490 static void parser_done(struct parser_context *ctx)
491 {
492         if (!ctx)
493                 return;
494         controlvm_payload_bytes_buffered -= ctx->param_bytes;
495         kfree(ctx);
496 }
497
498 static void *
499 parser_string_get(struct parser_context *ctx)
500 {
501         u8 *pscan;
502         unsigned long nscan;
503         int value_length = -1;
504         void *value = NULL;
505         int i;
506
507         if (!ctx)
508                 return NULL;
509         pscan = ctx->curr;
510         nscan = ctx->bytes_remaining;
511         if (nscan == 0)
512                 return NULL;
513         if (!pscan)
514                 return NULL;
515         for (i = 0, value_length = -1; i < nscan; i++)
516                 if (pscan[i] == '\0') {
517                         value_length = i;
518                         break;
519                 }
520         if (value_length < 0)   /* '\0' was not included in the length */
521                 value_length = nscan;
522         value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
523         if (value == NULL)
524                 return NULL;
525         if (value_length > 0)
526                 memcpy(value, pscan, value_length);
527         ((u8 *) (value))[value_length] = '\0';
528         return value;
529 }
530
531
532 static ssize_t toolaction_show(struct device *dev,
533                                struct device_attribute *attr,
534                                char *buf)
535 {
536         u8 tool_action;
537
538         visorchannel_read(controlvm_channel,
539                 offsetof(struct spar_controlvm_channel_protocol,
540                          tool_action), &tool_action, sizeof(u8));
541         return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
542 }
543
544 static ssize_t toolaction_store(struct device *dev,
545                                 struct device_attribute *attr,
546                                 const char *buf, size_t count)
547 {
548         u8 tool_action;
549         int ret;
550
551         if (kstrtou8(buf, 10, &tool_action))
552                 return -EINVAL;
553
554         ret = visorchannel_write(controlvm_channel,
555                 offsetof(struct spar_controlvm_channel_protocol,
556                          tool_action),
557                 &tool_action, sizeof(u8));
558
559         if (ret)
560                 return ret;
561         return count;
562 }
563
564 static ssize_t boottotool_show(struct device *dev,
565                                struct device_attribute *attr,
566                                char *buf)
567 {
568         struct efi_spar_indication efi_spar_indication;
569
570         visorchannel_read(controlvm_channel,
571                           offsetof(struct spar_controlvm_channel_protocol,
572                                    efi_spar_ind), &efi_spar_indication,
573                           sizeof(struct efi_spar_indication));
574         return scnprintf(buf, PAGE_SIZE, "%u\n",
575                          efi_spar_indication.boot_to_tool);
576 }
577
578 static ssize_t boottotool_store(struct device *dev,
579                                 struct device_attribute *attr,
580                                 const char *buf, size_t count)
581 {
582         int val, ret;
583         struct efi_spar_indication efi_spar_indication;
584
585         if (kstrtoint(buf, 10, &val))
586                 return -EINVAL;
587
588         efi_spar_indication.boot_to_tool = val;
589         ret = visorchannel_write(controlvm_channel,
590                         offsetof(struct spar_controlvm_channel_protocol,
591                                  efi_spar_ind), &(efi_spar_indication),
592                                  sizeof(struct efi_spar_indication));
593
594         if (ret)
595                 return ret;
596         return count;
597 }
598
599 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
600                           char *buf)
601 {
602         u32 error;
603
604         visorchannel_read(controlvm_channel,
605                           offsetof(struct spar_controlvm_channel_protocol,
606                                    installation_error),
607                           &error, sizeof(u32));
608         return scnprintf(buf, PAGE_SIZE, "%i\n", error);
609 }
610
611 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
612                            const char *buf, size_t count)
613 {
614         u32 error;
615         int ret;
616
617         if (kstrtou32(buf, 10, &error))
618                 return -EINVAL;
619
620         ret = visorchannel_write(controlvm_channel,
621                 offsetof(struct spar_controlvm_channel_protocol,
622                          installation_error),
623                 &error, sizeof(u32));
624         if (ret)
625                 return ret;
626         return count;
627 }
628
629 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
630                            char *buf)
631 {
632         u32 text_id;
633
634         visorchannel_read(controlvm_channel,
635                           offsetof(struct spar_controlvm_channel_protocol,
636                                    installation_text_id),
637                           &text_id, sizeof(u32));
638         return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
639 }
640
641 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
642                             const char *buf, size_t count)
643 {
644         u32 text_id;
645         int ret;
646
647         if (kstrtou32(buf, 10, &text_id))
648                 return -EINVAL;
649
650         ret = visorchannel_write(controlvm_channel,
651                 offsetof(struct spar_controlvm_channel_protocol,
652                          installation_text_id),
653                 &text_id, sizeof(u32));
654         if (ret)
655                 return ret;
656         return count;
657 }
658
659 static ssize_t remaining_steps_show(struct device *dev,
660                                     struct device_attribute *attr, char *buf)
661 {
662         u16 remaining_steps;
663
664         visorchannel_read(controlvm_channel,
665                           offsetof(struct spar_controlvm_channel_protocol,
666                                    installation_remaining_steps),
667                           &remaining_steps, sizeof(u16));
668         return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
669 }
670
671 static ssize_t remaining_steps_store(struct device *dev,
672                                      struct device_attribute *attr,
673                                      const char *buf, size_t count)
674 {
675         u16 remaining_steps;
676         int ret;
677
678         if (kstrtou16(buf, 10, &remaining_steps))
679                 return -EINVAL;
680
681         ret = visorchannel_write(controlvm_channel,
682                 offsetof(struct spar_controlvm_channel_protocol,
683                          installation_remaining_steps),
684                 &remaining_steps, sizeof(u16));
685         if (ret)
686                 return ret;
687         return count;
688 }
689
690 struct visor_busdev {
691         u32 bus_no;
692         u32 dev_no;
693 };
694
695 static int match_visorbus_dev_by_id(struct device *dev, void *data)
696 {
697         struct visor_device *vdev = to_visor_device(dev);
698         struct visor_busdev *id = (struct visor_busdev *)data;
699         u32 bus_no = id->bus_no;
700         u32 dev_no = id->dev_no;
701
702         if ((vdev->chipset_bus_no == bus_no) &&
703             (vdev->chipset_dev_no == dev_no))
704                 return 1;
705
706         return 0;
707 }
708 struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
709                                                struct visor_device *from)
710 {
711         struct device *dev;
712         struct device *dev_start = NULL;
713         struct visor_device *vdev = NULL;
714         struct visor_busdev id = {
715                         .bus_no = bus_no,
716                         .dev_no = dev_no
717                 };
718
719         if (from)
720                 dev_start = &from->device;
721         dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
722                               match_visorbus_dev_by_id);
723         if (dev)
724                 vdev = to_visor_device(dev);
725         return vdev;
726 }
727 EXPORT_SYMBOL(visorbus_get_device_by_id);
728
729 static u8
730 check_chipset_events(void)
731 {
732         int i;
733         u8 send_msg = 1;
734         /* Check events to determine if response should be sent */
735         for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
736                 send_msg &= chipset_events[i];
737         return send_msg;
738 }
739
740 static void
741 clear_chipset_events(void)
742 {
743         int i;
744         /* Clear chipset_events */
745         for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
746                 chipset_events[i] = 0;
747 }
748
749 void
750 visorchipset_register_busdev(
751                         struct visorchipset_busdev_notifiers *notifiers,
752                         struct visorchipset_busdev_responders *responders,
753                         struct ultra_vbus_deviceinfo *driver_info)
754 {
755         down(&notifier_lock);
756         if (!notifiers) {
757                 memset(&busdev_notifiers, 0,
758                        sizeof(busdev_notifiers));
759                 visorbusregistered = 0; /* clear flag */
760         } else {
761                 busdev_notifiers = *notifiers;
762                 visorbusregistered = 1; /* set flag */
763         }
764         if (responders)
765                 *responders = busdev_responders;
766         if (driver_info)
767                 bus_device_info_init(driver_info, "chipset", "visorchipset",
768                                      VERSION, NULL);
769
770         up(&notifier_lock);
771 }
772 EXPORT_SYMBOL_GPL(visorchipset_register_busdev);
773
774 static void
775 chipset_init(struct controlvm_message *inmsg)
776 {
777         static int chipset_inited;
778         enum ultra_chipset_feature features = 0;
779         int rc = CONTROLVM_RESP_SUCCESS;
780
781         POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
782         if (chipset_inited) {
783                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
784                 goto cleanup;
785         }
786         chipset_inited = 1;
787         POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
788
789         /* Set features to indicate we support parahotplug (if Command
790          * also supports it). */
791         features =
792             inmsg->cmd.init_chipset.
793             features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
794
795         /* Set the "reply" bit so Command knows this is a
796          * features-aware driver. */
797         features |= ULTRA_CHIPSET_FEATURE_REPLY;
798
799 cleanup:
800         if (inmsg->hdr.flags.response_expected)
801                 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
802 }
803
804 static void
805 controlvm_init_response(struct controlvm_message *msg,
806                         struct controlvm_message_header *msg_hdr, int response)
807 {
808         memset(msg, 0, sizeof(struct controlvm_message));
809         memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
810         msg->hdr.payload_bytes = 0;
811         msg->hdr.payload_vm_offset = 0;
812         msg->hdr.payload_max_bytes = 0;
813         if (response < 0) {
814                 msg->hdr.flags.failed = 1;
815                 msg->hdr.completion_status = (u32) (-response);
816         }
817 }
818
819 static void
820 controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
821 {
822         struct controlvm_message outmsg;
823
824         controlvm_init_response(&outmsg, msg_hdr, response);
825         if (outmsg.hdr.flags.test_message == 1)
826                 return;
827
828         if (!visorchannel_signalinsert(controlvm_channel,
829                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
830                 return;
831         }
832 }
833
834 static void
835 controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
836                                int response,
837                                enum ultra_chipset_feature features)
838 {
839         struct controlvm_message outmsg;
840
841         controlvm_init_response(&outmsg, msg_hdr, response);
842         outmsg.cmd.init_chipset.features = features;
843         if (!visorchannel_signalinsert(controlvm_channel,
844                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
845                 return;
846         }
847 }
848
849 static void controlvm_respond_physdev_changestate(
850                 struct controlvm_message_header *msg_hdr, int response,
851                 struct spar_segment_state state)
852 {
853         struct controlvm_message outmsg;
854
855         controlvm_init_response(&outmsg, msg_hdr, response);
856         outmsg.cmd.device_change_state.state = state;
857         outmsg.cmd.device_change_state.flags.phys_device = 1;
858         if (!visorchannel_signalinsert(controlvm_channel,
859                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
860                 return;
861         }
862 }
863
864 enum crash_obj_type {
865         CRASH_DEV,
866         CRASH_BUS,
867 };
868
869 static void
870 bus_responder(enum controlvm_id cmd_id,
871               struct controlvm_message_header *pending_msg_hdr,
872               int response)
873 {
874         if (pending_msg_hdr == NULL)
875                 return;         /* no controlvm response needed */
876
877         if (pending_msg_hdr->id != (u32)cmd_id)
878                 return;
879
880         controlvm_respond(pending_msg_hdr, response);
881 }
882
883 static void
884 device_changestate_responder(enum controlvm_id cmd_id,
885                              struct visor_device *p, int response,
886                              struct spar_segment_state response_state)
887 {
888         struct controlvm_message outmsg;
889         u32 bus_no = p->chipset_bus_no;
890         u32 dev_no = p->chipset_dev_no;
891
892         if (p->pending_msg_hdr == NULL)
893                 return;         /* no controlvm response needed */
894         if (p->pending_msg_hdr->id != cmd_id)
895                 return;
896
897         controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
898
899         outmsg.cmd.device_change_state.bus_no = bus_no;
900         outmsg.cmd.device_change_state.dev_no = dev_no;
901         outmsg.cmd.device_change_state.state = response_state;
902
903         if (!visorchannel_signalinsert(controlvm_channel,
904                                        CONTROLVM_QUEUE_REQUEST, &outmsg))
905                 return;
906 }
907
908 static void
909 device_responder(enum controlvm_id cmd_id,
910                  struct controlvm_message_header *pending_msg_hdr,
911                  int response)
912 {
913         if (pending_msg_hdr == NULL)
914                 return;         /* no controlvm response needed */
915
916         if (pending_msg_hdr->id != (u32)cmd_id)
917                 return;
918
919         controlvm_respond(pending_msg_hdr, response);
920 }
921
922 static void
923 bus_epilog(struct visor_device *bus_info,
924            u32 cmd, struct controlvm_message_header *msg_hdr,
925            int response, bool need_response)
926 {
927         bool notified = false;
928         struct controlvm_message_header *pmsg_hdr = NULL;
929
930         if (!bus_info) {
931                 /* relying on a valid passed in response code */
932                 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
933                 pmsg_hdr = msg_hdr;
934                 goto away;
935         }
936
937         if (bus_info->pending_msg_hdr) {
938                 /* only non-NULL if dev is still waiting on a response */
939                 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
940                 pmsg_hdr = bus_info->pending_msg_hdr;
941                 goto away;
942         }
943
944         if (need_response) {
945                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
946                 if (!pmsg_hdr) {
947                         response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
948                         goto away;
949                 }
950
951                 memcpy(pmsg_hdr, msg_hdr,
952                        sizeof(struct controlvm_message_header));
953                 bus_info->pending_msg_hdr = pmsg_hdr;
954         }
955
956         down(&notifier_lock);
957         if (response == CONTROLVM_RESP_SUCCESS) {
958                 switch (cmd) {
959                 case CONTROLVM_BUS_CREATE:
960                         if (busdev_notifiers.bus_create) {
961                                 (*busdev_notifiers.bus_create) (bus_info);
962                                 notified = true;
963                         }
964                         break;
965                 case CONTROLVM_BUS_DESTROY:
966                         if (busdev_notifiers.bus_destroy) {
967                                 (*busdev_notifiers.bus_destroy) (bus_info);
968                                 notified = true;
969                         }
970                         break;
971                 }
972         }
973 away:
974         if (notified)
975                 /* The callback function just called above is responsible
976                  * for calling the appropriate visorchipset_busdev_responders
977                  * function, which will call bus_responder()
978                  */
979                 ;
980         else
981                 /*
982                  * Do not kfree(pmsg_hdr) as this is the failure path.
983                  * The success path ('notified') will call the responder
984                  * directly and kfree() there.
985                  */
986                 bus_responder(cmd, pmsg_hdr, response);
987         up(&notifier_lock);
988 }
989
990 static void
991 device_epilog(struct visor_device *dev_info,
992               struct spar_segment_state state, u32 cmd,
993               struct controlvm_message_header *msg_hdr, int response,
994               bool need_response, bool for_visorbus)
995 {
996         struct visorchipset_busdev_notifiers *notifiers;
997         bool notified = false;
998         struct controlvm_message_header *pmsg_hdr = NULL;
999
1000         notifiers = &busdev_notifiers;
1001
1002         if (!dev_info) {
1003                 /* relying on a valid passed in response code */
1004                 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
1005                 pmsg_hdr = msg_hdr;
1006                 goto away;
1007         }
1008
1009         if (dev_info->pending_msg_hdr) {
1010                 /* only non-NULL if dev is still waiting on a response */
1011                 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1012                 pmsg_hdr = dev_info->pending_msg_hdr;
1013                 goto away;
1014         }
1015
1016         if (need_response) {
1017                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
1018                 if (!pmsg_hdr) {
1019                         response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1020                         goto away;
1021                 }
1022
1023                 memcpy(pmsg_hdr, msg_hdr,
1024                        sizeof(struct controlvm_message_header));
1025                 dev_info->pending_msg_hdr = pmsg_hdr;
1026         }
1027
1028         down(&notifier_lock);
1029         if (response >= 0) {
1030                 switch (cmd) {
1031                 case CONTROLVM_DEVICE_CREATE:
1032                         if (notifiers->device_create) {
1033                                 (*notifiers->device_create) (dev_info);
1034                                 notified = true;
1035                         }
1036                         break;
1037                 case CONTROLVM_DEVICE_CHANGESTATE:
1038                         /* ServerReady / ServerRunning / SegmentStateRunning */
1039                         if (state.alive == segment_state_running.alive &&
1040                             state.operating ==
1041                                 segment_state_running.operating) {
1042                                 if (notifiers->device_resume) {
1043                                         (*notifiers->device_resume) (dev_info);
1044                                         notified = true;
1045                                 }
1046                         }
1047                         /* ServerNotReady / ServerLost / SegmentStateStandby */
1048                         else if (state.alive == segment_state_standby.alive &&
1049                                  state.operating ==
1050                                  segment_state_standby.operating) {
1051                                 /* technically this is standby case
1052                                  * where server is lost
1053                                  */
1054                                 if (notifiers->device_pause) {
1055                                         (*notifiers->device_pause) (dev_info);
1056                                         notified = true;
1057                                 }
1058                         }
1059                         break;
1060                 case CONTROLVM_DEVICE_DESTROY:
1061                         if (notifiers->device_destroy) {
1062                                 (*notifiers->device_destroy) (dev_info);
1063                                 notified = true;
1064                         }
1065                         break;
1066                 }
1067         }
1068 away:
1069         if (notified)
1070                 /* The callback function just called above is responsible
1071                  * for calling the appropriate visorchipset_busdev_responders
1072                  * function, which will call device_responder()
1073                  */
1074                 ;
1075         else
1076                 /*
1077                  * Do not kfree(pmsg_hdr) as this is the failure path.
1078                  * The success path ('notified') will call the responder
1079                  * directly and kfree() there.
1080                  */
1081                 device_responder(cmd, pmsg_hdr, response);
1082         up(&notifier_lock);
1083 }
1084
1085 static void
1086 bus_create(struct controlvm_message *inmsg)
1087 {
1088         struct controlvm_message_packet *cmd = &inmsg->cmd;
1089         u32 bus_no = cmd->create_bus.bus_no;
1090         int rc = CONTROLVM_RESP_SUCCESS;
1091         struct visor_device *bus_info;
1092         struct visorchannel *visorchannel;
1093
1094         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1095         if (bus_info && (bus_info->state.created == 1)) {
1096                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1097                                  POSTCODE_SEVERITY_ERR);
1098                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1099                 goto cleanup;
1100         }
1101         bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1102         if (!bus_info) {
1103                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1104                                  POSTCODE_SEVERITY_ERR);
1105                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1106                 goto cleanup;
1107         }
1108
1109         INIT_LIST_HEAD(&bus_info->list_all);
1110         bus_info->chipset_bus_no = bus_no;
1111         bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
1112
1113         POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
1114
1115         visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
1116                                            cmd->create_bus.channel_bytes,
1117                                            GFP_KERNEL,
1118                                            cmd->create_bus.bus_data_type_uuid);
1119
1120         if (!visorchannel) {
1121                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1122                                  POSTCODE_SEVERITY_ERR);
1123                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1124                 kfree(bus_info);
1125                 bus_info = NULL;
1126                 goto cleanup;
1127         }
1128         bus_info->visorchannel = visorchannel;
1129
1130         POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
1131
1132 cleanup:
1133         bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1134                    rc, inmsg->hdr.flags.response_expected == 1);
1135 }
1136
1137 static void
1138 bus_destroy(struct controlvm_message *inmsg)
1139 {
1140         struct controlvm_message_packet *cmd = &inmsg->cmd;
1141         u32 bus_no = cmd->destroy_bus.bus_no;
1142         struct visor_device *bus_info;
1143         int rc = CONTROLVM_RESP_SUCCESS;
1144
1145         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1146         if (!bus_info)
1147                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1148         else if (bus_info->state.created == 0)
1149                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1150
1151         bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1152                    rc, inmsg->hdr.flags.response_expected == 1);
1153
1154         /* bus_info is freed as part of the busdevice_release function */
1155 }
1156
1157 static void
1158 bus_configure(struct controlvm_message *inmsg,
1159               struct parser_context *parser_ctx)
1160 {
1161         struct controlvm_message_packet *cmd = &inmsg->cmd;
1162         u32 bus_no;
1163         struct visor_device *bus_info;
1164         int rc = CONTROLVM_RESP_SUCCESS;
1165
1166         bus_no = cmd->configure_bus.bus_no;
1167         POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1168                          POSTCODE_SEVERITY_INFO);
1169
1170         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1171         if (!bus_info) {
1172                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1173                                  POSTCODE_SEVERITY_ERR);
1174                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1175         } else if (bus_info->state.created == 0) {
1176                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1177                                  POSTCODE_SEVERITY_ERR);
1178                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1179         } else if (bus_info->pending_msg_hdr != NULL) {
1180                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1181                                  POSTCODE_SEVERITY_ERR);
1182                 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1183         } else {
1184                 visorchannel_set_clientpartition(bus_info->visorchannel,
1185                                 cmd->configure_bus.guest_handle);
1186                 bus_info->partition_uuid = parser_id_get(parser_ctx);
1187                 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1188                 bus_info->name = parser_string_get(parser_ctx);
1189
1190                 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1191                                  POSTCODE_SEVERITY_INFO);
1192         }
1193         bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1194                    rc, inmsg->hdr.flags.response_expected == 1);
1195 }
1196
1197 static void
1198 my_device_create(struct controlvm_message *inmsg)
1199 {
1200         struct controlvm_message_packet *cmd = &inmsg->cmd;
1201         u32 bus_no = cmd->create_device.bus_no;
1202         u32 dev_no = cmd->create_device.dev_no;
1203         struct visor_device *dev_info = NULL;
1204         struct visor_device *bus_info;
1205         struct visorchannel *visorchannel;
1206         int rc = CONTROLVM_RESP_SUCCESS;
1207
1208         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1209         if (!bus_info) {
1210                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1211                                  POSTCODE_SEVERITY_ERR);
1212                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1213                 goto cleanup;
1214         }
1215
1216         if (bus_info->state.created == 0) {
1217                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1218                                  POSTCODE_SEVERITY_ERR);
1219                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1220                 goto cleanup;
1221         }
1222
1223         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1224         if (dev_info && (dev_info->state.created == 1)) {
1225                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1226                                  POSTCODE_SEVERITY_ERR);
1227                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1228                 goto cleanup;
1229         }
1230
1231         dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1232         if (!dev_info) {
1233                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1234                                  POSTCODE_SEVERITY_ERR);
1235                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1236                 goto cleanup;
1237         }
1238
1239         dev_info->chipset_bus_no = bus_no;
1240         dev_info->chipset_dev_no = dev_no;
1241         dev_info->inst = cmd->create_device.dev_inst_uuid;
1242
1243         /* not sure where the best place to set the 'parent' */
1244         dev_info->device.parent = &bus_info->device;
1245
1246         POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
1247                          POSTCODE_SEVERITY_INFO);
1248
1249         visorchannel =
1250                visorchannel_create_with_lock(cmd->create_device.channel_addr,
1251                                              cmd->create_device.channel_bytes,
1252                                              GFP_KERNEL,
1253                                              cmd->create_device.data_type_uuid);
1254
1255         if (!visorchannel) {
1256                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1257                                  POSTCODE_SEVERITY_ERR);
1258                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1259                 kfree(dev_info);
1260                 dev_info = NULL;
1261                 goto cleanup;
1262         }
1263         dev_info->visorchannel = visorchannel;
1264         dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
1265         POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
1266                          POSTCODE_SEVERITY_INFO);
1267 cleanup:
1268         device_epilog(dev_info, segment_state_running,
1269                       CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1270                       inmsg->hdr.flags.response_expected == 1, 1);
1271 }
1272
1273 static void
1274 my_device_changestate(struct controlvm_message *inmsg)
1275 {
1276         struct controlvm_message_packet *cmd = &inmsg->cmd;
1277         u32 bus_no = cmd->device_change_state.bus_no;
1278         u32 dev_no = cmd->device_change_state.dev_no;
1279         struct spar_segment_state state = cmd->device_change_state.state;
1280         struct visor_device *dev_info;
1281         int rc = CONTROLVM_RESP_SUCCESS;
1282
1283         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1284         if (!dev_info) {
1285                 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1286                                  POSTCODE_SEVERITY_ERR);
1287                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1288         } else if (dev_info->state.created == 0) {
1289                 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1290                                  POSTCODE_SEVERITY_ERR);
1291                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1292         }
1293         if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1294                 device_epilog(dev_info, state,
1295                               CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
1296                               inmsg->hdr.flags.response_expected == 1, 1);
1297 }
1298
1299 static void
1300 my_device_destroy(struct controlvm_message *inmsg)
1301 {
1302         struct controlvm_message_packet *cmd = &inmsg->cmd;
1303         u32 bus_no = cmd->destroy_device.bus_no;
1304         u32 dev_no = cmd->destroy_device.dev_no;
1305         struct visor_device *dev_info;
1306         int rc = CONTROLVM_RESP_SUCCESS;
1307
1308         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1309         if (!dev_info)
1310                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1311         else if (dev_info->state.created == 0)
1312                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1313
1314         if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1315                 device_epilog(dev_info, segment_state_running,
1316                               CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1317                               inmsg->hdr.flags.response_expected == 1, 1);
1318 }
1319
1320 /* When provided with the physical address of the controlvm channel
1321  * (phys_addr), the offset to the payload area we need to manage
1322  * (offset), and the size of this payload area (bytes), fills in the
1323  * controlvm_payload_info struct.  Returns true for success or false
1324  * for failure.
1325  */
1326 static int
1327 initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
1328                                   struct visor_controlvm_payload_info *info)
1329 {
1330         u8 __iomem *payload = NULL;
1331         int rc = CONTROLVM_RESP_SUCCESS;
1332
1333         if (!info) {
1334                 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1335                 goto cleanup;
1336         }
1337         memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1338         if ((offset == 0) || (bytes == 0)) {
1339                 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1340                 goto cleanup;
1341         }
1342         payload = ioremap_cache(phys_addr + offset, bytes);
1343         if (!payload) {
1344                 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1345                 goto cleanup;
1346         }
1347
1348         info->offset = offset;
1349         info->bytes = bytes;
1350         info->ptr = payload;
1351
1352 cleanup:
1353         if (rc < 0) {
1354                 if (payload) {
1355                         iounmap(payload);
1356                         payload = NULL;
1357                 }
1358         }
1359         return rc;
1360 }
1361
1362 static void
1363 destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
1364 {
1365         if (info->ptr) {
1366                 iounmap(info->ptr);
1367                 info->ptr = NULL;
1368         }
1369         memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1370 }
1371
1372 static void
1373 initialize_controlvm_payload(void)
1374 {
1375         u64 phys_addr = visorchannel_get_physaddr(controlvm_channel);
1376         u64 payload_offset = 0;
1377         u32 payload_bytes = 0;
1378
1379         if (visorchannel_read(controlvm_channel,
1380                               offsetof(struct spar_controlvm_channel_protocol,
1381                                        request_payload_offset),
1382                               &payload_offset, sizeof(payload_offset)) < 0) {
1383                 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1384                                  POSTCODE_SEVERITY_ERR);
1385                 return;
1386         }
1387         if (visorchannel_read(controlvm_channel,
1388                               offsetof(struct spar_controlvm_channel_protocol,
1389                                        request_payload_bytes),
1390                               &payload_bytes, sizeof(payload_bytes)) < 0) {
1391                 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1392                                  POSTCODE_SEVERITY_ERR);
1393                 return;
1394         }
1395         initialize_controlvm_payload_info(phys_addr,
1396                                           payload_offset, payload_bytes,
1397                                           &controlvm_payload_info);
1398 }
1399
1400 /*  Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1401  *  Returns CONTROLVM_RESP_xxx code.
1402  */
1403 static int
1404 visorchipset_chipset_ready(void)
1405 {
1406         kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1407         return CONTROLVM_RESP_SUCCESS;
1408 }
1409
1410 static int
1411 visorchipset_chipset_selftest(void)
1412 {
1413         char env_selftest[20];
1414         char *envp[] = { env_selftest, NULL };
1415
1416         sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1417         kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1418                            envp);
1419         return CONTROLVM_RESP_SUCCESS;
1420 }
1421
1422 /*  Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1423  *  Returns CONTROLVM_RESP_xxx code.
1424  */
1425 static int
1426 visorchipset_chipset_notready(void)
1427 {
1428         kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1429         return CONTROLVM_RESP_SUCCESS;
1430 }
1431
1432 static void
1433 chipset_ready(struct controlvm_message_header *msg_hdr)
1434 {
1435         int rc = visorchipset_chipset_ready();
1436
1437         if (rc != CONTROLVM_RESP_SUCCESS)
1438                 rc = -rc;
1439         if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1440                 controlvm_respond(msg_hdr, rc);
1441         if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
1442                 /* Send CHIPSET_READY response when all modules have been loaded
1443                  * and disks mounted for the partition
1444                  */
1445                 g_chipset_msg_hdr = *msg_hdr;
1446         }
1447 }
1448
1449 static void
1450 chipset_selftest(struct controlvm_message_header *msg_hdr)
1451 {
1452         int rc = visorchipset_chipset_selftest();
1453
1454         if (rc != CONTROLVM_RESP_SUCCESS)
1455                 rc = -rc;
1456         if (msg_hdr->flags.response_expected)
1457                 controlvm_respond(msg_hdr, rc);
1458 }
1459
1460 static void
1461 chipset_notready(struct controlvm_message_header *msg_hdr)
1462 {
1463         int rc = visorchipset_chipset_notready();
1464
1465         if (rc != CONTROLVM_RESP_SUCCESS)
1466                 rc = -rc;
1467         if (msg_hdr->flags.response_expected)
1468                 controlvm_respond(msg_hdr, rc);
1469 }
1470
1471 /* This is your "one-stop" shop for grabbing the next message from the
1472  * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1473  */
1474 static bool
1475 read_controlvm_event(struct controlvm_message *msg)
1476 {
1477         if (visorchannel_signalremove(controlvm_channel,
1478                                       CONTROLVM_QUEUE_EVENT, msg)) {
1479                 /* got a message */
1480                 if (msg->hdr.flags.test_message == 1)
1481                         return false;
1482                 return true;
1483         }
1484         return false;
1485 }
1486
1487 /*
1488  * The general parahotplug flow works as follows.  The visorchipset
1489  * driver receives a DEVICE_CHANGESTATE message from Command
1490  * specifying a physical device to enable or disable.  The CONTROLVM
1491  * message handler calls parahotplug_process_message, which then adds
1492  * the message to a global list and kicks off a udev event which
1493  * causes a user level script to enable or disable the specified
1494  * device.  The udev script then writes to
1495  * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1496  * to get called, at which point the appropriate CONTROLVM message is
1497  * retrieved from the list and responded to.
1498  */
1499
1500 #define PARAHOTPLUG_TIMEOUT_MS 2000
1501
1502 /*
1503  * Generate unique int to match an outstanding CONTROLVM message with a
1504  * udev script /proc response
1505  */
1506 static int
1507 parahotplug_next_id(void)
1508 {
1509         static atomic_t id = ATOMIC_INIT(0);
1510
1511         return atomic_inc_return(&id);
1512 }
1513
1514 /*
1515  * Returns the time (in jiffies) when a CONTROLVM message on the list
1516  * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1517  */
1518 static unsigned long
1519 parahotplug_next_expiration(void)
1520 {
1521         return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1522 }
1523
1524 /*
1525  * Create a parahotplug_request, which is basically a wrapper for a
1526  * CONTROLVM_MESSAGE that we can stick on a list
1527  */
1528 static struct parahotplug_request *
1529 parahotplug_request_create(struct controlvm_message *msg)
1530 {
1531         struct parahotplug_request *req;
1532
1533         req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1534         if (!req)
1535                 return NULL;
1536
1537         req->id = parahotplug_next_id();
1538         req->expiration = parahotplug_next_expiration();
1539         req->msg = *msg;
1540
1541         return req;
1542 }
1543
1544 /*
1545  * Free a parahotplug_request.
1546  */
1547 static void
1548 parahotplug_request_destroy(struct parahotplug_request *req)
1549 {
1550         kfree(req);
1551 }
1552
1553 /*
1554  * Cause uevent to run the user level script to do the disable/enable
1555  * specified in (the CONTROLVM message in) the specified
1556  * parahotplug_request
1557  */
1558 static void
1559 parahotplug_request_kickoff(struct parahotplug_request *req)
1560 {
1561         struct controlvm_message_packet *cmd = &req->msg.cmd;
1562         char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1563             env_func[40];
1564         char *envp[] = {
1565                 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1566         };
1567
1568         sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1569         sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1570         sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1571                 cmd->device_change_state.state.active);
1572         sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1573                 cmd->device_change_state.bus_no);
1574         sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1575                 cmd->device_change_state.dev_no >> 3);
1576         sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1577                 cmd->device_change_state.dev_no & 0x7);
1578
1579         kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1580                            envp);
1581 }
1582
1583 /*
1584  * Remove any request from the list that's been on there too long and
1585  * respond with an error.
1586  */
1587 static void
1588 parahotplug_process_list(void)
1589 {
1590         struct list_head *pos;
1591         struct list_head *tmp;
1592
1593         spin_lock(&parahotplug_request_list_lock);
1594
1595         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1596                 struct parahotplug_request *req =
1597                     list_entry(pos, struct parahotplug_request, list);
1598
1599                 if (!time_after_eq(jiffies, req->expiration))
1600                         continue;
1601
1602                 list_del(pos);
1603                 if (req->msg.hdr.flags.response_expected)
1604                         controlvm_respond_physdev_changestate(
1605                                 &req->msg.hdr,
1606                                 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1607                                 req->msg.cmd.device_change_state.state);
1608                 parahotplug_request_destroy(req);
1609         }
1610
1611         spin_unlock(&parahotplug_request_list_lock);
1612 }
1613
1614 /*
1615  * Called from the /proc handler, which means the user script has
1616  * finished the enable/disable.  Find the matching identifier, and
1617  * respond to the CONTROLVM message with success.
1618  */
1619 static int
1620 parahotplug_request_complete(int id, u16 active)
1621 {
1622         struct list_head *pos;
1623         struct list_head *tmp;
1624
1625         spin_lock(&parahotplug_request_list_lock);
1626
1627         /* Look for a request matching "id". */
1628         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1629                 struct parahotplug_request *req =
1630                     list_entry(pos, struct parahotplug_request, list);
1631                 if (req->id == id) {
1632                         /* Found a match.  Remove it from the list and
1633                          * respond.
1634                          */
1635                         list_del(pos);
1636                         spin_unlock(&parahotplug_request_list_lock);
1637                         req->msg.cmd.device_change_state.state.active = active;
1638                         if (req->msg.hdr.flags.response_expected)
1639                                 controlvm_respond_physdev_changestate(
1640                                         &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1641                                         req->msg.cmd.device_change_state.state);
1642                         parahotplug_request_destroy(req);
1643                         return 0;
1644                 }
1645         }
1646
1647         spin_unlock(&parahotplug_request_list_lock);
1648         return -1;
1649 }
1650
1651 /*
1652  * Enables or disables a PCI device by kicking off a udev script
1653  */
1654 static void
1655 parahotplug_process_message(struct controlvm_message *inmsg)
1656 {
1657         struct parahotplug_request *req;
1658
1659         req = parahotplug_request_create(inmsg);
1660
1661         if (!req)
1662                 return;
1663
1664         if (inmsg->cmd.device_change_state.state.active) {
1665                 /* For enable messages, just respond with success
1666                 * right away.  This is a bit of a hack, but there are
1667                 * issues with the early enable messages we get (with
1668                 * either the udev script not detecting that the device
1669                 * is up, or not getting called at all).  Fortunately
1670                 * the messages that get lost don't matter anyway, as
1671                 * devices are automatically enabled at
1672                 * initialization.
1673                 */
1674                 parahotplug_request_kickoff(req);
1675                 controlvm_respond_physdev_changestate(&inmsg->hdr,
1676                         CONTROLVM_RESP_SUCCESS,
1677                         inmsg->cmd.device_change_state.state);
1678                 parahotplug_request_destroy(req);
1679         } else {
1680                 /* For disable messages, add the request to the
1681                 * request list before kicking off the udev script.  It
1682                 * won't get responded to until the script has
1683                 * indicated it's done.
1684                 */
1685                 spin_lock(&parahotplug_request_list_lock);
1686                 list_add_tail(&req->list, &parahotplug_request_list);
1687                 spin_unlock(&parahotplug_request_list_lock);
1688
1689                 parahotplug_request_kickoff(req);
1690         }
1691 }
1692
1693 /* Process a controlvm message.
1694  * Return result:
1695  *    false - this function will return false only in the case where the
1696  *            controlvm message was NOT processed, but processing must be
1697  *            retried before reading the next controlvm message; a
1698  *            scenario where this can occur is when we need to throttle
1699  *            the allocation of memory in which to copy out controlvm
1700  *            payload data
1701  *    true  - processing of the controlvm message completed,
1702  *            either successfully or with an error.
1703  */
1704 static bool
1705 handle_command(struct controlvm_message inmsg, u64 channel_addr)
1706 {
1707         struct controlvm_message_packet *cmd = &inmsg.cmd;
1708         u64 parm_addr;
1709         u32 parm_bytes;
1710         struct parser_context *parser_ctx = NULL;
1711         bool local_addr;
1712         struct controlvm_message ackmsg;
1713
1714         /* create parsing context if necessary */
1715         local_addr = (inmsg.hdr.flags.test_message == 1);
1716         if (channel_addr == 0)
1717                 return true;
1718         parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1719         parm_bytes = inmsg.hdr.payload_bytes;
1720
1721         /* Parameter and channel addresses within test messages actually lie
1722          * within our OS-controlled memory.  We need to know that, because it
1723          * makes a difference in how we compute the virtual address.
1724          */
1725         if (parm_addr && parm_bytes) {
1726                 bool retry = false;
1727
1728                 parser_ctx =
1729                     parser_init_byte_stream(parm_addr, parm_bytes,
1730                                             local_addr, &retry);
1731                 if (!parser_ctx && retry)
1732                         return false;
1733         }
1734
1735         if (!local_addr) {
1736                 controlvm_init_response(&ackmsg, &inmsg.hdr,
1737                                         CONTROLVM_RESP_SUCCESS);
1738                 if (controlvm_channel)
1739                         visorchannel_signalinsert(controlvm_channel,
1740                                                   CONTROLVM_QUEUE_ACK,
1741                                                   &ackmsg);
1742         }
1743         switch (inmsg.hdr.id) {
1744         case CONTROLVM_CHIPSET_INIT:
1745                 chipset_init(&inmsg);
1746                 break;
1747         case CONTROLVM_BUS_CREATE:
1748                 bus_create(&inmsg);
1749                 break;
1750         case CONTROLVM_BUS_DESTROY:
1751                 bus_destroy(&inmsg);
1752                 break;
1753         case CONTROLVM_BUS_CONFIGURE:
1754                 bus_configure(&inmsg, parser_ctx);
1755                 break;
1756         case CONTROLVM_DEVICE_CREATE:
1757                 my_device_create(&inmsg);
1758                 break;
1759         case CONTROLVM_DEVICE_CHANGESTATE:
1760                 if (cmd->device_change_state.flags.phys_device) {
1761                         parahotplug_process_message(&inmsg);
1762                 } else {
1763                         /* save the hdr and cmd structures for later use */
1764                         /* when sending back the response to Command */
1765                         my_device_changestate(&inmsg);
1766                         g_devicechangestate_packet = inmsg.cmd;
1767                         break;
1768                 }
1769                 break;
1770         case CONTROLVM_DEVICE_DESTROY:
1771                 my_device_destroy(&inmsg);
1772                 break;
1773         case CONTROLVM_DEVICE_CONFIGURE:
1774                 /* no op for now, just send a respond that we passed */
1775                 if (inmsg.hdr.flags.response_expected)
1776                         controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1777                 break;
1778         case CONTROLVM_CHIPSET_READY:
1779                 chipset_ready(&inmsg.hdr);
1780                 break;
1781         case CONTROLVM_CHIPSET_SELFTEST:
1782                 chipset_selftest(&inmsg.hdr);
1783                 break;
1784         case CONTROLVM_CHIPSET_STOP:
1785                 chipset_notready(&inmsg.hdr);
1786                 break;
1787         default:
1788                 if (inmsg.hdr.flags.response_expected)
1789                         controlvm_respond(&inmsg.hdr,
1790                                 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1791                 break;
1792         }
1793
1794         if (parser_ctx) {
1795                 parser_done(parser_ctx);
1796                 parser_ctx = NULL;
1797         }
1798         return true;
1799 }
1800
1801 static inline unsigned int
1802 issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
1803 {
1804         struct vmcall_io_controlvm_addr_params params;
1805         int result = VMCALL_SUCCESS;
1806         u64 physaddr;
1807
1808         physaddr = virt_to_phys(&params);
1809         ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
1810         if (VMCALL_SUCCESSFUL(result)) {
1811                 *control_addr = params.address;
1812                 *control_bytes = params.channel_bytes;
1813         }
1814         return result;
1815 }
1816
1817 static u64 controlvm_get_channel_address(void)
1818 {
1819         u64 addr = 0;
1820         u32 size = 0;
1821
1822         if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1823                 return 0;
1824
1825         return addr;
1826 }
1827
1828 static void
1829 controlvm_periodic_work(struct work_struct *work)
1830 {
1831         struct controlvm_message inmsg;
1832         bool got_command = false;
1833         bool handle_command_failed = false;
1834         static u64 poll_count;
1835
1836         /* make sure visorbus server is registered for controlvm callbacks */
1837         if (visorchipset_visorbusregwait && !visorbusregistered)
1838                 goto cleanup;
1839
1840         poll_count++;
1841         if (poll_count >= 250)
1842                 ;       /* keep going */
1843         else
1844                 goto cleanup;
1845
1846         /* Check events to determine if response to CHIPSET_READY
1847          * should be sent
1848          */
1849         if (visorchipset_holdchipsetready &&
1850             (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1851                 if (check_chipset_events() == 1) {
1852                         controlvm_respond(&g_chipset_msg_hdr, 0);
1853                         clear_chipset_events();
1854                         memset(&g_chipset_msg_hdr, 0,
1855                                sizeof(struct controlvm_message_header));
1856                 }
1857         }
1858
1859         while (visorchannel_signalremove(controlvm_channel,
1860                                          CONTROLVM_QUEUE_RESPONSE,
1861                                          &inmsg))
1862                 ;
1863         if (!got_command) {
1864                 if (controlvm_pending_msg_valid) {
1865                         /* we throttled processing of a prior
1866                         * msg, so try to process it again
1867                         * rather than reading a new one
1868                         */
1869                         inmsg = controlvm_pending_msg;
1870                         controlvm_pending_msg_valid = false;
1871                         got_command = true;
1872                 } else {
1873                         got_command = read_controlvm_event(&inmsg);
1874                 }
1875         }
1876
1877         handle_command_failed = false;
1878         while (got_command && (!handle_command_failed)) {
1879                 most_recent_message_jiffies = jiffies;
1880                 if (handle_command(inmsg,
1881                                    visorchannel_get_physaddr
1882                                    (controlvm_channel)))
1883                         got_command = read_controlvm_event(&inmsg);
1884                 else {
1885                         /* this is a scenario where throttling
1886                         * is required, but probably NOT an
1887                         * error...; we stash the current
1888                         * controlvm msg so we will attempt to
1889                         * reprocess it on our next loop
1890                         */
1891                         handle_command_failed = true;
1892                         controlvm_pending_msg = inmsg;
1893                         controlvm_pending_msg_valid = true;
1894                 }
1895         }
1896
1897         /* parahotplug_worker */
1898         parahotplug_process_list();
1899
1900 cleanup:
1901
1902         if (time_after(jiffies,
1903                        most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
1904                 /* it's been longer than MIN_IDLE_SECONDS since we
1905                 * processed our last controlvm message; slow down the
1906                 * polling
1907                 */
1908                 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1909                         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1910         } else {
1911                 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1912                         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1913         }
1914
1915         queue_delayed_work(periodic_controlvm_workqueue,
1916                            &periodic_controlvm_work, poll_jiffies);
1917 }
1918
1919 static void
1920 setup_crash_devices_work_queue(struct work_struct *work)
1921 {
1922         struct controlvm_message local_crash_bus_msg;
1923         struct controlvm_message local_crash_dev_msg;
1924         struct controlvm_message msg;
1925         u32 local_crash_msg_offset;
1926         u16 local_crash_msg_count;
1927
1928         /* make sure visorbus is registered for controlvm callbacks */
1929         if (visorchipset_visorbusregwait && !visorbusregistered)
1930                 goto cleanup;
1931
1932         POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1933
1934         /* send init chipset msg */
1935         msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1936         msg.cmd.init_chipset.bus_count = 23;
1937         msg.cmd.init_chipset.switch_count = 0;
1938
1939         chipset_init(&msg);
1940
1941         /* get saved message count */
1942         if (visorchannel_read(controlvm_channel,
1943                               offsetof(struct spar_controlvm_channel_protocol,
1944                                        saved_crash_message_count),
1945                               &local_crash_msg_count, sizeof(u16)) < 0) {
1946                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1947                                  POSTCODE_SEVERITY_ERR);
1948                 return;
1949         }
1950
1951         if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1952                 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1953                                  local_crash_msg_count,
1954                                  POSTCODE_SEVERITY_ERR);
1955                 return;
1956         }
1957
1958         /* get saved crash message offset */
1959         if (visorchannel_read(controlvm_channel,
1960                               offsetof(struct spar_controlvm_channel_protocol,
1961                                        saved_crash_message_offset),
1962                               &local_crash_msg_offset, sizeof(u32)) < 0) {
1963                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1964                                  POSTCODE_SEVERITY_ERR);
1965                 return;
1966         }
1967
1968         /* read create device message for storage bus offset */
1969         if (visorchannel_read(controlvm_channel,
1970                               local_crash_msg_offset,
1971                               &local_crash_bus_msg,
1972                               sizeof(struct controlvm_message)) < 0) {
1973                 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1974                                  POSTCODE_SEVERITY_ERR);
1975                 return;
1976         }
1977
1978         /* read create device message for storage device */
1979         if (visorchannel_read(controlvm_channel,
1980                               local_crash_msg_offset +
1981                               sizeof(struct controlvm_message),
1982                               &local_crash_dev_msg,
1983                               sizeof(struct controlvm_message)) < 0) {
1984                 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1985                                  POSTCODE_SEVERITY_ERR);
1986                 return;
1987         }
1988
1989         /* reuse IOVM create bus message */
1990         if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
1991                 bus_create(&local_crash_bus_msg);
1992         } else {
1993                 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1994                                  POSTCODE_SEVERITY_ERR);
1995                 return;
1996         }
1997
1998         /* reuse create device message for storage device */
1999         if (local_crash_dev_msg.cmd.create_device.channel_addr) {
2000                 my_device_create(&local_crash_dev_msg);
2001         } else {
2002                 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
2003                                  POSTCODE_SEVERITY_ERR);
2004                 return;
2005         }
2006         POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
2007         return;
2008
2009 cleanup:
2010
2011         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2012
2013         queue_delayed_work(periodic_controlvm_workqueue,
2014                            &periodic_controlvm_work, poll_jiffies);
2015 }
2016
2017 static void
2018 bus_create_response(struct visor_device *bus_info, int response)
2019 {
2020         if (response >= 0)
2021                 bus_info->state.created = 1;
2022
2023         bus_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
2024                       response);
2025
2026         kfree(bus_info->pending_msg_hdr);
2027         bus_info->pending_msg_hdr = NULL;
2028 }
2029
2030 static void
2031 bus_destroy_response(struct visor_device *bus_info, int response)
2032 {
2033         bus_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
2034                       response);
2035
2036         kfree(bus_info->pending_msg_hdr);
2037         bus_info->pending_msg_hdr = NULL;
2038 }
2039
2040 static void
2041 device_create_response(struct visor_device *dev_info, int response)
2042 {
2043         if (response >= 0)
2044                 dev_info->state.created = 1;
2045
2046         device_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
2047                          response);
2048
2049         kfree(dev_info->pending_msg_hdr);
2050         dev_info->pending_msg_hdr = NULL;
2051 }
2052
2053 static void
2054 device_destroy_response(struct visor_device *dev_info, int response)
2055 {
2056         device_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
2057                          response);
2058
2059         kfree(dev_info->pending_msg_hdr);
2060         dev_info->pending_msg_hdr = NULL;
2061 }
2062
2063 static void
2064 visorchipset_device_pause_response(struct visor_device *dev_info,
2065                                    int response)
2066 {
2067         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2068                                      dev_info, response,
2069                                      segment_state_standby);
2070
2071         kfree(dev_info->pending_msg_hdr);
2072         dev_info->pending_msg_hdr = NULL;
2073 }
2074
2075 static void
2076 device_resume_response(struct visor_device *dev_info, int response)
2077 {
2078         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2079                                      dev_info, response,
2080                                      segment_state_running);
2081
2082         kfree(dev_info->pending_msg_hdr);
2083         dev_info->pending_msg_hdr = NULL;
2084 }
2085
2086 static ssize_t chipsetready_store(struct device *dev,
2087                                   struct device_attribute *attr,
2088                                   const char *buf, size_t count)
2089 {
2090         char msgtype[64];
2091
2092         if (sscanf(buf, "%63s", msgtype) != 1)
2093                 return -EINVAL;
2094
2095         if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
2096                 chipset_events[0] = 1;
2097                 return count;
2098         } else if (!strcmp(msgtype, "MODULES_LOADED")) {
2099                 chipset_events[1] = 1;
2100                 return count;
2101         }
2102         return -EINVAL;
2103 }
2104
2105 /* The parahotplug/devicedisabled interface gets called by our support script
2106  * when an SR-IOV device has been shut down. The ID is passed to the script
2107  * and then passed back when the device has been removed.
2108  */
2109 static ssize_t devicedisabled_store(struct device *dev,
2110                                     struct device_attribute *attr,
2111                                     const char *buf, size_t count)
2112 {
2113         unsigned int id;
2114
2115         if (kstrtouint(buf, 10, &id))
2116                 return -EINVAL;
2117
2118         parahotplug_request_complete(id, 0);
2119         return count;
2120 }
2121
2122 /* The parahotplug/deviceenabled interface gets called by our support script
2123  * when an SR-IOV device has been recovered. The ID is passed to the script
2124  * and then passed back when the device has been brought back up.
2125  */
2126 static ssize_t deviceenabled_store(struct device *dev,
2127                                    struct device_attribute *attr,
2128                                    const char *buf, size_t count)
2129 {
2130         unsigned int id;
2131
2132         if (kstrtouint(buf, 10, &id))
2133                 return -EINVAL;
2134
2135         parahotplug_request_complete(id, 1);
2136         return count;
2137 }
2138
2139 static int
2140 visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
2141 {
2142         unsigned long physaddr = 0;
2143         unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
2144         u64 addr = 0;
2145
2146         /* sv_enable_dfp(); */
2147         if (offset & (PAGE_SIZE - 1))
2148                 return -ENXIO;  /* need aligned offsets */
2149
2150         switch (offset) {
2151         case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
2152                 vma->vm_flags |= VM_IO;
2153                 if (!*file_controlvm_channel)
2154                         return -ENXIO;
2155
2156                 visorchannel_read(*file_controlvm_channel,
2157                         offsetof(struct spar_controlvm_channel_protocol,
2158                                  gp_control_channel),
2159                         &addr, sizeof(addr));
2160                 if (!addr)
2161                         return -ENXIO;
2162
2163                 physaddr = (unsigned long)addr;
2164                 if (remap_pfn_range(vma, vma->vm_start,
2165                                     physaddr >> PAGE_SHIFT,
2166                                     vma->vm_end - vma->vm_start,
2167                                     /*pgprot_noncached */
2168                                     (vma->vm_page_prot))) {
2169                         return -EAGAIN;
2170                 }
2171                 break;
2172         default:
2173                 return -ENXIO;
2174         }
2175         return 0;
2176 }
2177
2178 static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
2179 {
2180         u64 result = VMCALL_SUCCESS;
2181         u64 physaddr = 0;
2182
2183         ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
2184                         result);
2185         return result;
2186 }
2187
2188 static inline int issue_vmcall_update_physical_time(u64 adjustment)
2189 {
2190         int result = VMCALL_SUCCESS;
2191
2192         ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
2193         return result;
2194 }
2195
2196 static long visorchipset_ioctl(struct file *file, unsigned int cmd,
2197                                unsigned long arg)
2198 {
2199         s64 adjustment;
2200         s64 vrtc_offset;
2201
2202         switch (cmd) {
2203         case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
2204                 /* get the physical rtc offset */
2205                 vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
2206                 if (copy_to_user((void __user *)arg, &vrtc_offset,
2207                                  sizeof(vrtc_offset))) {
2208                         return -EFAULT;
2209                 }
2210                 return 0;
2211         case VMCALL_UPDATE_PHYSICAL_TIME:
2212                 if (copy_from_user(&adjustment, (void __user *)arg,
2213                                    sizeof(adjustment))) {
2214                         return -EFAULT;
2215                 }
2216                 return issue_vmcall_update_physical_time(adjustment);
2217         default:
2218                 return -EFAULT;
2219         }
2220 }
2221
2222 static const struct file_operations visorchipset_fops = {
2223         .owner = THIS_MODULE,
2224         .open = visorchipset_open,
2225         .read = NULL,
2226         .write = NULL,
2227         .unlocked_ioctl = visorchipset_ioctl,
2228         .release = visorchipset_release,
2229         .mmap = visorchipset_mmap,
2230 };
2231
2232 static int
2233 visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
2234 {
2235         int rc = 0;
2236
2237         file_controlvm_channel = controlvm_channel;
2238         cdev_init(&file_cdev, &visorchipset_fops);
2239         file_cdev.owner = THIS_MODULE;
2240         if (MAJOR(major_dev) == 0) {
2241                 rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
2242                 /* dynamic major device number registration required */
2243                 if (rc < 0)
2244                         return rc;
2245         } else {
2246                 /* static major device number registration required */
2247                 rc = register_chrdev_region(major_dev, 1, "visorchipset");
2248                 if (rc < 0)
2249                         return rc;
2250         }
2251         rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
2252         if (rc < 0) {
2253                 unregister_chrdev_region(major_dev, 1);
2254                 return rc;
2255         }
2256         return 0;
2257 }
2258
2259 static int
2260 visorchipset_init(struct acpi_device *acpi_device)
2261 {
2262         int rc = 0;
2263         u64 addr;
2264         int tmp_sz = sizeof(struct spar_controlvm_channel_protocol);
2265         uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
2266
2267         addr = controlvm_get_channel_address();
2268         if (!addr)
2269                 return -ENODEV;
2270
2271         memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
2272         memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2273
2274         controlvm_channel = visorchannel_create_with_lock(addr, tmp_sz,
2275                                                           GFP_KERNEL, uuid);
2276         if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2277                     visorchannel_get_header(controlvm_channel))) {
2278                 initialize_controlvm_payload();
2279         } else {
2280                 visorchannel_destroy(controlvm_channel);
2281                 controlvm_channel = NULL;
2282                 return -ENODEV;
2283         }
2284
2285         major_dev = MKDEV(visorchipset_major, 0);
2286         rc = visorchipset_file_init(major_dev, &controlvm_channel);
2287         if (rc < 0) {
2288                 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2289                 goto cleanup;
2290         }
2291
2292         memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2293
2294         /* if booting in a crash kernel */
2295         if (is_kdump_kernel())
2296                 INIT_DELAYED_WORK(&periodic_controlvm_work,
2297                                   setup_crash_devices_work_queue);
2298         else
2299                 INIT_DELAYED_WORK(&periodic_controlvm_work,
2300                                   controlvm_periodic_work);
2301         periodic_controlvm_workqueue =
2302             create_singlethread_workqueue("visorchipset_controlvm");
2303
2304         if (!periodic_controlvm_workqueue) {
2305                 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2306                                  DIAG_SEVERITY_ERR);
2307                 rc = -ENOMEM;
2308                 goto cleanup;
2309         }
2310         most_recent_message_jiffies = jiffies;
2311         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2312         rc = queue_delayed_work(periodic_controlvm_workqueue,
2313                                 &periodic_controlvm_work, poll_jiffies);
2314         if (rc < 0) {
2315                 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2316                                  DIAG_SEVERITY_ERR);
2317                 goto cleanup;
2318         }
2319
2320         visorchipset_platform_device.dev.devt = major_dev;
2321         if (platform_device_register(&visorchipset_platform_device) < 0) {
2322                 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2323                 rc = -1;
2324                 goto cleanup;
2325         }
2326         POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2327
2328         rc = visorbus_init();
2329 cleanup:
2330         if (rc) {
2331                 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2332                                  POSTCODE_SEVERITY_ERR);
2333         }
2334         return rc;
2335 }
2336
2337 static void
2338 visorchipset_file_cleanup(dev_t major_dev)
2339 {
2340         if (file_cdev.ops)
2341                 cdev_del(&file_cdev);
2342         file_cdev.ops = NULL;
2343         unregister_chrdev_region(major_dev, 1);
2344 }
2345
2346 static int
2347 visorchipset_exit(struct acpi_device *acpi_device)
2348 {
2349         POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2350
2351         visorbus_exit();
2352
2353         cancel_delayed_work(&periodic_controlvm_work);
2354         flush_workqueue(periodic_controlvm_workqueue);
2355         destroy_workqueue(periodic_controlvm_workqueue);
2356         periodic_controlvm_workqueue = NULL;
2357         destroy_controlvm_payload_info(&controlvm_payload_info);
2358
2359         memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2360
2361         visorchannel_destroy(controlvm_channel);
2362
2363         visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
2364         platform_device_unregister(&visorchipset_platform_device);
2365         POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2366
2367         return 0;
2368 }
2369
2370 static const struct acpi_device_id unisys_device_ids[] = {
2371         {"PNP0A07", 0},
2372         {"", 0},
2373 };
2374
2375 static struct acpi_driver unisys_acpi_driver = {
2376         .name = "unisys_acpi",
2377         .class = "unisys_acpi_class",
2378         .owner = THIS_MODULE,
2379         .ids = unisys_device_ids,
2380         .ops = {
2381                 .add = visorchipset_init,
2382                 .remove = visorchipset_exit,
2383                 },
2384 };
2385
2386 MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
2387
2388 static __init uint32_t visorutil_spar_detect(void)
2389 {
2390         unsigned int eax, ebx, ecx, edx;
2391
2392         if (cpu_has_hypervisor) {
2393                 /* check the ID */
2394                 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
2395                 return  (ebx == UNISYS_SPAR_ID_EBX) &&
2396                         (ecx == UNISYS_SPAR_ID_ECX) &&
2397                         (edx == UNISYS_SPAR_ID_EDX);
2398         } else {
2399                 return 0;
2400         }
2401 }
2402
2403 static int init_unisys(void)
2404 {
2405         int result;
2406
2407         if (!visorutil_spar_detect())
2408                 return -ENODEV;
2409
2410         result = acpi_bus_register_driver(&unisys_acpi_driver);
2411         if (result)
2412                 return -ENODEV;
2413
2414         pr_info("Unisys Visorchipset Driver Loaded.\n");
2415         return 0;
2416 };
2417
2418 static void exit_unisys(void)
2419 {
2420         acpi_bus_unregister_driver(&unisys_acpi_driver);
2421 }
2422
2423 module_param_named(major, visorchipset_major, int, S_IRUGO);
2424 MODULE_PARM_DESC(visorchipset_major,
2425                  "major device number to use for the device node");
2426 module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
2427 MODULE_PARM_DESC(visorchipset_visorbusreqwait,
2428                  "1 to have the module wait for the visor bus to register");
2429 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2430                    int, S_IRUGO);
2431 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2432                  "1 to hold response to CHIPSET_READY");
2433
2434 module_init(init_unisys);
2435 module_exit(exit_unisys);
2436
2437 MODULE_AUTHOR("Unisys");
2438 MODULE_LICENSE("GPL");
2439 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2440                    VERSION);
2441 MODULE_VERSION(VERSION);