[ "${CHRDEV_NAME}" = "/dev/" ] && CHRDEV_NAME="/dev/segdev"
[ -z "${CHRDEV_MAJOR}" ] && CHRDEV_MAJOR=`cat /proc/devices | grep segdev | awk '{print $1}'`
[ -z "${CHRDEV_MAJOR}" ] && CHRDEV_MAJOR=60
- [ -z "${NR_OPS}" ] && NR_OPS=64
+ [ -z "${NR_OPS}" ] && NR_OPS=128
[ -z "${BPORT}" ] && BPORT=0
[ -z "${VPORT}" ] && VPORT=2
[ -z "${MPORT}" ] && MPORT=1
function spawn_mapperdc {
pgrep -f "mt-mapperd" || \
"${XSEG_HOME}/peers/user/mt-mapperd" -t 1 -p "$MPORT" -bp "$BPORT" -mbp "$MBPORT" -g "${SPEC}"\
- &> "${XSEG_LOGS}/mapperd-${HOSTNAME}" &
+ -n ${NR_OPS} &> "${XSEG_LOGS}/mapperd-${HOSTNAME}" &
# alloc_requests "$MPORT:0" 128
}
uint64_t datalen;
xport p;
int r;
+ unsigned long flags;
+ spin_unlock_irq(&xsegbd_dev->rqlock);
for (;;) {
+ if (current_thread_info()->preempt_count || irqs_disabled()){
+ XSEGLOG("Current thread preempt_count: %d, irqs_disabled(): %lu ",
+ current_thread_info()->preempt_count, irqs_disabled());
+ }
+ //XSEGLOG("Priority: %d", current_thread_info()->task->prio);
+ //XSEGLOG("Static priority: %d", current_thread_info()->task->static_prio);
+ //XSEGLOG("Normal priority: %d", current_thread_info()->task->normal_prio);
+ //XSEGLOG("Rt_priority: %u", current_thread_info()->task->rt_priority);
blkreq_idx = Noneidx;
xreq = xseg_get_request(xsegbd_dev->xseg, xsegbd_dev->src_portno,
xsegbd_dev->dst_portno, X_ALLOC);
break;
}
+
+ spin_lock_irqsave(&xsegbd_dev->rqlock, flags);
blkreq = blk_fetch_request(rq);
- if (!blkreq)
+ if (!blkreq){
+ spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags);
break;
+ }
if (blkreq->cmd_type != REQ_TYPE_FS) {
//we lose xreq here
XSEGLOG("non-fs cmd_type: %u. *shrug*", blkreq->cmd_type);
__blk_end_request_all(blkreq, 0);
+ spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags);
continue;
}
+ spin_unlock_irqrestore(&xsegbd_dev->rqlock, flags);
+ if (current_thread_info()->preempt_count || irqs_disabled()){
+ XSEGLOG("Current thread preempt_count: %d, irqs_disabled(): %lu ",
+ current_thread_info()->preempt_count, irqs_disabled());
+ }
datalen = blk_rq_bytes(blkreq);
r = xseg_prep_request(xsegbd_dev->xseg, xreq,
xsegbd_dev->targetlen, datalen);
if (r < 0) {
XSEGLOG("couldn't prep request");
- __blk_end_request_err(blkreq, r);
+ blk_end_request_err(blkreq, r);
BUG_ON(1);
break;
}
r = -ENOMEM;
if (xreq->bufferlen - xsegbd_dev->targetlen < datalen){
XSEGLOG("malformed req buffers");
- __blk_end_request_err(blkreq, r);
+ blk_end_request_err(blkreq, r);
BUG_ON(1);
break;
}
if (p == NoPort) {
XSEGLOG("coundn't submit req");
BUG_ON(1);
- __blk_end_request_err(blkreq, r);
+ blk_end_request_err(blkreq, r);
break;
}
WARN_ON(xseg_signal(xsegbd_dev->xsegbd->xseg, p) < 0);
if (blkreq_idx != Noneidx)
BUG_ON(xq_append_head(&xsegbd_dev->blk_queue_pending,
blkreq_idx, xsegbd_dev->src_portno) == Noneidx);
+ spin_lock_irq(&xsegbd_dev->rqlock);
}
int update_dev_sectors_from_request( struct xsegbd_device *xsegbd_dev,
BUG_ON(1);
}
}
-
if (xsegbd_dev) {
spin_lock_irqsave(&xsegbd_dev->rqlock, flags);
xseg_request_fn(xsegbd_dev->blk_queue);
enum log_level { E = 0, W = 1, I = 2, D = 3};
extern int (*init_logctx)(struct log_ctx *lc, char *peer_name, enum log_level log_level, char *logfile);
void __xseg_log2(struct log_ctx *lc, enum log_level level, char *fmt, ...);
+
+void xseg_printtrace(void);
#endif
return;
}
+void xseg_printtrace(void)
+{
+ dump_stack();
+}
+
module_init(xsegmod_init);
module_exit(xsegmod_exit);
return;
}
+
+void xseg_printtrace(void)
+{
+ void *array[10];
+ size_t size;
+ char **strings;
+ int i;
+
+ size = backtrace (array, 10);
+ strings = backtrace_symbols (array, size);
+
+ XSEGLOG("Obtained %zd stack frames.\n", size);
+
+ for (i = 0; i < size; i++)
+ XSEGLOG ("%s\n", strings[i]);
+
+ free (strings);
+}
EXPORT_SYMBOL(__xseg_log);
EXPORT_SYMBOL(init_logctx);
EXPORT_SYMBOL(__xseg_log2);
+EXPORT_SYMBOL(xseg_printtrace);
#ifndef _XLOCK_H
#define _XLOCK_H
+#include <sys/util.h>
+
#define MFENCE() __sync_synchronize()
#define BARRIER() __asm__ __volatile__ ("" ::: "memory")
#define __pause() __asm__ __volatile__ ("pause\n");
#define __pause()
#define Noone ((unsigned long)-1)
+#define MAX_VALID_OWNER 65536 /* we are not gonna have more ports than that */
struct xlock {
- long owner;
+ unsigned long owner;
} __attribute__ ((aligned (16))); /* support up to 128bit longs */
static inline unsigned long xlock_acquire(struct xlock *lock, unsigned long who)
{
+ unsigned long owner;
+ unsigned long times = 1;
for (;;) {
- for (; *(volatile unsigned long *)(&lock->owner) != Noone; )
+ for (owner = *(volatile unsigned long *)(&lock->owner); ; owner = *(volatile unsigned long *)(&lock->owner)){
+ if (owner == Noone)
+ break;
+ if (owner > MAX_VALID_OWNER){
+ XSEGLOG("xlock %lx corrupted. Lock owner %lu",
+ (unsigned long) lock, owner);
+ XSEGLOG("Resetting xlock %lx to Noone",
+ (unsigned long) lock);
+ lock->owner = Noone;
+ }
+ if (!(times & ((1<<20) -1))){
+ XSEGLOG("xlock %lx spinned for %llu times"
+ "\n\t who: %lu, owner: %lu",
+ (unsigned long) lock, times,
+ who, owner);
+// xseg_printtrace();
+ }
+ times++;
__pause();
+ }
if (__sync_bool_compare_and_swap(&lock->owner, Noone, who))
break;
}
+ if (lock->owner > MAX_VALID_OWNER){
+ XSEGLOG("xlock %lx locked with INVALID lock owner %lu",
+ (unsigned long) lock, lock->owner);
+ }
return who;
}