5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/moduleparam.h>
8 #include <linux/init.h>
11 #include <linux/init.h>
12 #include <linux/list.h>
13 #include <linux/cdev.h>
14 #include <linux/poll.h>
15 #include <linux/slab.h>
16 #include <linux/sched.h>
17 #include <linux/ioctl.h>
18 #include <linux/types.h>
19 #include <linux/module.h>
20 #include <linux/mmzone.h>
21 #include <linux/vmalloc.h>
22 #include <linux/spinlock.h>
23 #include <linux/wait.h>
27 static struct segdev segdev;
29 int segdev_create_segment(struct segdev *dev, u64 segsize, char reserved)
32 int ret = mutex_lock_interruptible(&dev->mutex);
40 /* vmalloc can handle large sizes */
42 segment = vmalloc(segsize);
46 dev->segsize = segsize;
47 dev->segment = segment;
48 memset(dev->segment, 0, segsize);
49 set_bit(SEGDEV_READY, &dev->flags);
51 set_bit(SEGDEV_RESERVED, &dev->flags);
55 mutex_unlock(&dev->mutex);
60 EXPORT_SYMBOL(segdev_create_segment);
62 int segdev_destroy_segment(struct segdev *dev)
64 int ret = mutex_lock_interruptible(&dev->mutex);
69 * The segment trully dies when everyone in userspace has unmapped it.
70 * However, the kernel mapping is immediately destroyed.
71 * Kernel users are notified to abort via switching of SEGDEV_READY.
72 * The mapping deallocation is performed when all kernel users
73 * have stopped using the segment as reported by usercount.
81 if (test_bit(SEGDEV_RESERVED, &dev->flags))
84 clear_bit(SEGDEV_READY, &dev->flags);
85 ret = wait_event_interruptible(dev->wq, atomic_read(&dev->usercount) <= 1);
95 mutex_unlock(&dev->mutex);
96 set_bit(SEGDEV_READY, &dev->flags);
101 EXPORT_SYMBOL(segdev_destroy_segment);
103 struct segdev *segdev_get(int minor)
105 struct segdev *dev = ERR_PTR(-ENODEV);
110 atomic_inc(&dev->usercount);
111 if (!test_bit(SEGDEV_READY, &dev->flags))
117 atomic_dec(&dev->usercount);
118 dev = ERR_PTR(-EBUSY);
122 EXPORT_SYMBOL(segdev_get);
124 void segdev_put(struct segdev *dev)
126 atomic_dec(&dev->usercount);
128 /* ain't all this too heavy ? */
131 EXPORT_SYMBOL(segdev_put);
133 /* ********************* */
134 /* ** File Operations ** */
135 /* ********************* */
141 static int segdev_open(struct inode *inode, struct file *file)
143 struct segdev_file *vf = kmalloc(sizeof(struct segdev_file), GFP_KERNEL);
147 file->private_data = vf;
151 static int segdev_release(struct inode *inode, struct file *file)
153 struct segdev_file *vf = file->private_data;
158 static long segdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
167 case SEGDEV_IOC_CREATESEG:
169 ret = IS_ERR(dev) ? PTR_ERR(dev) : 0;
173 ret = segdev_create_segment(dev, (u64)arg, 0);
177 case SEGDEV_IOC_DESTROYSEG:
179 ret = segdev_destroy_segment(&segdev);
183 case SEGDEV_IOC_SEGSIZE:
186 ret = IS_ERR(dev) ? PTR_ERR(dev) : 0;
205 static ssize_t segdev_read(struct file *file, char __user *buf,
206 size_t count, loff_t *f_pos)
211 static ssize_t segdev_write(struct file *file, const char __user *buf,
212 size_t count, loff_t *f_pos)
214 struct segdev_file *vf = file->private_data;
215 struct segdev *dev = segdev_get(vf->minor);
220 if (count > SEGDEV_BUFSIZE)
221 count = SEGDEV_BUFSIZE;
223 ret = copy_from_user(dev->buffer, buf, count);
227 dev->buffer_index = count - ret;
235 dev->buffer_index = 0;
241 static int segdev_mmap(struct file *file, struct vm_area_struct *vma)
243 struct segdev_file *vf = file->private_data;
245 size_t size = vma->vm_end - vma->vm_start;
246 unsigned long start = vma->vm_start, end = start + size;
250 dev = segdev_get(vf->minor);
260 /* do not allow offset mappings, for now */
261 if (vma->vm_pgoff || size > dev->segsize)
264 /* allow only shared, read-write mappings */
265 if (!(vma->vm_flags & VM_SHARED))
268 /* the segment is vmalloc() so we have to iterate through
269 * all pages and laboriously map them one by one. */
270 for (; start < end; start += PAGE_SIZE, ptr += PAGE_SIZE) {
271 ret = remap_pfn_range(vma, start, vmalloc_to_pfn(ptr),
272 PAGE_SIZE, vma->vm_page_prot);
274 goto out_put; /* mmap syscall should clean up, right? */
285 static struct file_operations segdev_ops =
287 .owner = THIS_MODULE,
289 .release = segdev_release,
291 .write = segdev_write,
293 .unlocked_ioctl = segdev_ioctl,
297 /* *************************** */
298 /* ** Module Initialization ** */
299 /* *************************** */
301 static void segdev_init(struct segdev *dev, int minor)
307 atomic_set(&dev->usercount, 0);
308 init_waitqueue_head(&dev->wq);
309 cdev_init(&dev->cdev, &segdev_ops);
310 mutex_init(&dev->mutex);
311 spin_lock_init(&dev->lock);
312 dev->cdev.owner = THIS_MODULE;
313 set_bit(SEGDEV_READY, &dev->flags);
316 int __init segdev_mod_init(void)
319 dev_t dev_no = MKDEV(SEGDEV_MAJOR, 0);
320 ret = register_chrdev_region(dev_no, 1, "segdev");
324 segdev_init(&segdev, 0);
325 ret = cdev_add(&segdev.cdev, dev_no, 1);
332 unregister_chrdev_region(dev_no, 1);
337 void __exit segdev_mod_exit(void)
339 dev_t dev_no = MKDEV(SEGDEV_MAJOR, 0);
340 segdev_destroy_segment(&segdev);
341 cdev_del(&segdev.cdev);
342 unregister_chrdev_region(dev_no, 1);
345 module_init(segdev_mod_init);
346 module_exit(segdev_mod_exit);
348 MODULE_DESCRIPTION("segdev");
349 MODULE_AUTHOR("XSEG");
350 MODULE_LICENSE("GPL");