5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/moduleparam.h>
8 #include <linux/init.h>
11 #include <linux/init.h>
12 #include <linux/list.h>
13 #include <linux/cdev.h>
14 #include <linux/poll.h>
15 #include <linux/slab.h>
16 #include <linux/sched.h>
17 #include <linux/ioctl.h>
18 #include <linux/types.h>
19 #include <linux/module.h>
20 #include <linux/mmzone.h>
21 #include <linux/vmalloc.h>
22 #include <linux/spinlock.h>
23 #include <linux/wait.h>
27 static struct xsegdev xsegdev;
29 int xsegdev_create_segment(struct xsegdev *dev, u64 segsize, char reserved)
32 int ret = mutex_lock_interruptible(&dev->mutex);
40 /* vmalloc can handle large sizes */
42 segment = vmalloc(segsize);
46 dev->segsize = segsize;
47 dev->segment = segment;
48 memset(dev->segment, 0, segsize);
49 set_bit(XSEGDEV_READY, &dev->flags);
51 set_bit(XSEGDEV_RESERVED, &dev->flags);
55 mutex_unlock(&dev->mutex);
60 EXPORT_SYMBOL(xsegdev_create_segment);
62 int xsegdev_destroy_segment(struct xsegdev *dev)
64 int ret = mutex_lock_interruptible(&dev->mutex);
69 * The segment trully dies when everyone in userspace has unmapped it.
70 * However, the kernel mapping is immediately destroyed.
71 * Kernel users are notified to abort via switching of XSEGDEV_READY.
72 * The mapping deallocation is performed when all kernel users
73 * have stopped using the segment as reported by usercount.
81 if (test_bit(XSEGDEV_RESERVED, &dev->flags))
84 clear_bit(XSEGDEV_READY, &dev->flags);
85 ret = wait_event_interruptible(dev->wq, atomic_read(&dev->usercount) <= 1);
95 mutex_unlock(&dev->mutex);
96 set_bit(XSEGDEV_READY, &dev->flags);
101 EXPORT_SYMBOL(xsegdev_destroy_segment);
103 struct xsegdev *xsegdev_get(int minor)
105 struct xsegdev *dev = ERR_PTR(-ENODEV);
110 atomic_inc(&dev->usercount);
111 if (!test_bit(XSEGDEV_READY, &dev->flags))
117 atomic_dec(&dev->usercount);
118 dev = ERR_PTR(-EBUSY);
122 EXPORT_SYMBOL(xsegdev_get);
124 void xsegdev_put(struct xsegdev *dev)
126 atomic_dec(&dev->usercount);
128 /* ain't all this too heavy ? */
131 EXPORT_SYMBOL(xsegdev_put);
133 /* ********************* */
134 /* ** File Operations ** */
135 /* ********************* */
137 struct xsegdev_file {
141 static int xsegdev_open(struct inode *inode, struct file *file)
143 struct xsegdev_file *vf = kmalloc(sizeof(struct xsegdev_file), GFP_KERNEL);
147 file->private_data = vf;
151 static int xsegdev_release(struct inode *inode, struct file *file)
153 struct xsegdev_file *vf = file->private_data;
158 static long xsegdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
167 case XSEGDEV_IOC_CREATESEG:
168 dev = xsegdev_get(0);
169 ret = IS_ERR(dev) ? PTR_ERR(dev) : 0;
173 ret = xsegdev_create_segment(dev, (u64)arg, 0);
177 case XSEGDEV_IOC_DESTROYSEG:
178 dev = xsegdev_get(0);
179 ret = xsegdev_destroy_segment(&xsegdev);
183 case XSEGDEV_IOC_SEGSIZE:
184 dev = xsegdev_get(0);
186 ret = IS_ERR(dev) ? PTR_ERR(dev) : 0;
205 static ssize_t xsegdev_read(struct file *file, char __user *buf,
206 size_t count, loff_t *f_pos)
211 static ssize_t xsegdev_write(struct file *file, const char __user *buf,
212 size_t count, loff_t *f_pos)
214 struct xsegdev_file *vf = file->private_data;
215 struct xsegdev *dev = xsegdev_get(vf->minor);
222 ret = dev->callback(dev->callarg);
229 static int xsegdev_mmap(struct file *file, struct vm_area_struct *vma)
231 struct xsegdev_file *vf = file->private_data;
233 size_t size = vma->vm_end - vma->vm_start;
234 unsigned long start = vma->vm_start, end = start + size;
238 dev = xsegdev_get(vf->minor);
248 /* do not allow offset mappings, for now */
249 if (vma->vm_pgoff || size > dev->segsize)
252 /* allow only shared, read-write mappings */
253 if (!(vma->vm_flags & VM_SHARED))
256 /* the segment is vmalloc() so we have to iterate through
257 * all pages and laboriously map them one by one. */
258 for (; start < end; start += PAGE_SIZE, ptr += PAGE_SIZE) {
259 ret = remap_pfn_range(vma, start, vmalloc_to_pfn(ptr),
260 PAGE_SIZE, vma->vm_page_prot);
262 goto out_put; /* mmap syscall should clean up, right? */
273 static struct file_operations xsegdev_ops =
275 .owner = THIS_MODULE,
276 .open = xsegdev_open,
277 .release = xsegdev_release,
278 .read = xsegdev_read,
279 .write = xsegdev_write,
280 .mmap = xsegdev_mmap,
281 .unlocked_ioctl = xsegdev_ioctl,
285 /* *************************** */
286 /* ** Module Initialization ** */
287 /* *************************** */
289 static void xsegdev_init(struct xsegdev *dev, int minor)
295 atomic_set(&dev->usercount, 0);
296 init_waitqueue_head(&dev->wq);
297 cdev_init(&dev->cdev, &xsegdev_ops);
298 mutex_init(&dev->mutex);
299 spin_lock_init(&dev->lock);
300 dev->cdev.owner = THIS_MODULE;
301 set_bit(XSEGDEV_READY, &dev->flags);
304 int __init xsegdev_mod_init(void)
307 dev_t dev_no = MKDEV(XSEGDEV_MAJOR, 0);
308 ret = register_chrdev_region(dev_no, 1, "xsegdev");
312 xsegdev_init(&xsegdev, 0);
313 ret = cdev_add(&xsegdev.cdev, dev_no, 1);
320 unregister_chrdev_region(dev_no, 1);
325 void __exit xsegdev_mod_exit(void)
327 dev_t dev_no = MKDEV(XSEGDEV_MAJOR, 0);
328 xsegdev_destroy_segment(&xsegdev);
329 cdev_del(&xsegdev.cdev);
330 unregister_chrdev_region(dev_no, 1);
333 module_init(xsegdev_mod_init);
334 module_exit(xsegdev_mod_exit);
336 MODULE_DESCRIPTION("xsegdev");
337 MODULE_AUTHOR("XSEG");
338 MODULE_LICENSE("GPL");