5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/moduleparam.h>
8 #include <linux/init.h>
11 #include <linux/init.h>
12 #include <linux/list.h>
13 #include <linux/cdev.h>
14 #include <linux/poll.h>
15 #include <linux/slab.h>
16 #include <linux/sched.h>
17 #include <linux/ioctl.h>
18 #include <linux/types.h>
19 #include <linux/module.h>
20 #include <linux/mmzone.h>
21 #include <linux/vmalloc.h>
22 #include <linux/spinlock.h>
23 #include <linux/wait.h>
27 static struct segdev segdev;
29 int segdev_create_segment(struct segdev *dev, u64 segsize, char reserved)
32 int ret = mutex_lock_interruptible(&dev->mutex);
40 /* vmalloc can handle large sizes */
42 XSEGLOG("creating segment of size %llu\n", segsize);
43 segment = vmalloc(segsize);
47 dev->segsize = segsize;
48 dev->segment = segment;
49 memset(dev->segment, 0, segsize);
50 set_bit(SEGDEV_READY, &dev->flags);
52 set_bit(SEGDEV_RESERVED, &dev->flags);
56 mutex_unlock(&dev->mutex);
61 EXPORT_SYMBOL(segdev_create_segment);
63 int segdev_destroy_segment(struct segdev *dev)
65 int ret = mutex_lock_interruptible(&dev->mutex);
70 * The segment trully dies when everyone in userspace has unmapped it.
71 * However, the kernel mapping is immediately destroyed.
72 * Kernel users are notified to abort via switching of SEGDEV_READY.
73 * The mapping deallocation is performed when all kernel users
74 * have stopped using the segment as reported by usercount.
82 if (test_bit(SEGDEV_RESERVED, &dev->flags))
85 clear_bit(SEGDEV_READY, &dev->flags);
86 ret = wait_event_interruptible(dev->wq, atomic_read(&dev->usercount) <= 1);
96 mutex_unlock(&dev->mutex);
97 set_bit(SEGDEV_READY, &dev->flags);
102 EXPORT_SYMBOL(segdev_destroy_segment);
104 struct segdev *segdev_get(int minor)
106 struct segdev *dev = ERR_PTR(-ENODEV);
111 atomic_inc(&dev->usercount);
112 if (!test_bit(SEGDEV_READY, &dev->flags))
118 atomic_dec(&dev->usercount);
119 dev = ERR_PTR(-EBUSY);
123 EXPORT_SYMBOL(segdev_get);
125 void segdev_put(struct segdev *dev)
127 atomic_dec(&dev->usercount);
129 /* ain't all this too heavy ? */
132 EXPORT_SYMBOL(segdev_put);
134 /* ********************* */
135 /* ** File Operations ** */
136 /* ********************* */
142 static int segdev_open(struct inode *inode, struct file *file)
144 struct segdev_file *vf = kmalloc(sizeof(struct segdev_file), GFP_KERNEL);
148 file->private_data = vf;
152 static int segdev_release(struct inode *inode, struct file *file)
154 struct segdev_file *vf = file->private_data;
159 static long segdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
168 case SEGDEV_IOC_CREATESEG:
170 ret = IS_ERR(dev) ? PTR_ERR(dev) : 0;
174 ret = segdev_create_segment(dev, (u64)arg, 0);
178 case SEGDEV_IOC_DESTROYSEG:
180 ret = segdev_destroy_segment(&segdev);
184 case SEGDEV_IOC_SEGSIZE:
187 ret = IS_ERR(dev) ? PTR_ERR(dev) : 0;
206 static ssize_t segdev_read(struct file *file, char __user *buf,
207 size_t count, loff_t *f_pos)
212 static ssize_t segdev_write(struct file *file, const char __user *buf,
213 size_t count, loff_t *f_pos)
215 struct segdev_file *vf = file->private_data;
216 struct segdev *dev = segdev_get(vf->minor);
222 if (count != sizeof(uint32_t))
225 ret = copy_from_user(&portno, buf, sizeof(uint32_t));
229 if((count - ret) != sizeof(uint32_t))
234 dev->callback(dev, portno);
238 dev->buffer_index = 0;
244 static int segdev_mmap(struct file *file, struct vm_area_struct *vma)
246 struct segdev_file *vf = file->private_data;
248 size_t size = vma->vm_end - vma->vm_start;
249 unsigned long start = vma->vm_start, end = start + size;
253 dev = segdev_get(vf->minor);
263 /* do not allow offset mappings, for now */
264 if (vma->vm_pgoff || size > dev->segsize)
267 /* allow only shared, read-write mappings */
268 if (!(vma->vm_flags & VM_SHARED))
271 /* the segment is vmalloc() so we have to iterate through
272 * all pages and laboriously map them one by one. */
273 for (; start < end; start += PAGE_SIZE, ptr += PAGE_SIZE) {
274 ret = remap_pfn_range(vma, start, vmalloc_to_pfn(ptr),
275 PAGE_SIZE, vma->vm_page_prot);
277 goto out_put; /* mmap syscall should clean up, right? */
288 static struct file_operations segdev_ops =
290 .owner = THIS_MODULE,
292 .release = segdev_release,
294 .write = segdev_write,
296 .unlocked_ioctl = segdev_ioctl,
300 /* *************************** */
301 /* ** Module Initialization ** */
302 /* *************************** */
304 static void segdev_init(struct segdev *dev, int minor)
310 atomic_set(&dev->usercount, 0);
311 init_waitqueue_head(&dev->wq);
312 cdev_init(&dev->cdev, &segdev_ops);
313 mutex_init(&dev->mutex);
314 spin_lock_init(&dev->lock);
315 dev->cdev.owner = THIS_MODULE;
316 set_bit(SEGDEV_READY, &dev->flags);
319 int __init segdev_mod_init(void)
322 dev_t dev_no = MKDEV(SEGDEV_MAJOR, 0);
323 ret = register_chrdev_region(dev_no, 1, "segdev");
327 segdev_init(&segdev, 0);
328 ret = cdev_add(&segdev.cdev, dev_no, 1);
335 unregister_chrdev_region(dev_no, 1);
340 void __exit segdev_mod_exit(void)
342 dev_t dev_no = MKDEV(SEGDEV_MAJOR, 0);
343 segdev_destroy_segment(&segdev);
344 cdev_del(&segdev.cdev);
345 unregister_chrdev_region(dev_no, 1);
348 module_init(segdev_mod_init);
349 module_exit(segdev_mod_exit);
351 MODULE_DESCRIPTION("segdev");
352 MODULE_AUTHOR("XSEG");
353 MODULE_LICENSE("GPL");