Refactor code to use single quotes for strings, update HTML structure for better readability, and improve error handling in various modules. Added Prettier configuration for consistent code formatting.
This commit is contained in:
@@ -1,109 +0,0 @@
|
||||
;(function(){
|
||||
|
||||
var sT = setTimeout || {}, u;
|
||||
if(typeof window !== ''+u){ sT.window = window }
|
||||
var AXE = (sT.window||'').AXE || function(){};
|
||||
if(AXE.window = sT.window){ AXE.window.AXE = AXE }
|
||||
|
||||
var Gun = (AXE.window||'').GUN || require('./gun');
|
||||
(Gun.AXE = AXE).GUN = AXE.Gun = Gun;
|
||||
|
||||
//if(!Gun.window){ try{ require('./lib/axe') }catch(e){} }
|
||||
if(!Gun.window){ require('./lib/axe') }
|
||||
|
||||
Gun.on('opt', function(at){ start(at) ; this.to.next(at) }); // make sure to call the "next" middleware adapter.
|
||||
|
||||
function start(root){
|
||||
if(root.axe){ return }
|
||||
var opt = root.opt, peers = opt.peers;
|
||||
if(false === opt.axe){ return }
|
||||
if(!Gun.window){ return } // handled by ^ lib/axe.js
|
||||
var w = Gun.window, lS = w.localStorage || opt.localStorage || {}, loc = w.location || opt.location || {}, nav = w.navigator || opt.navigator || {};
|
||||
var axe = root.axe = {}, tmp, id;
|
||||
var mesh = opt.mesh = opt.mesh || Gun.Mesh(root); // DAM!
|
||||
|
||||
tmp = peers[id = loc.origin + '/gun'] = peers[id] || {};
|
||||
tmp.id = tmp.url = id; tmp.retry = tmp.retry || 0;
|
||||
tmp = peers[id = 'http://localhost:8765/gun'] = peers[id] || {};
|
||||
tmp.id = tmp.url = id; tmp.retry = tmp.retry || 0;
|
||||
Gun.log.once("AXE", "AXE enabled: Trying to find network via (1) local peer (2) last used peers (3) a URL parameter, and last (4) hard coded peers.");
|
||||
Gun.log.once("AXEWarn", "Warning: AXE is in alpha, use only for testing!");
|
||||
var last = lS.peers || ''; if(last){ last += ' ' }
|
||||
last += ((loc.search||'').split('peers=')[1]||'').split('&')[0];
|
||||
|
||||
root.on('bye', function(peer){
|
||||
this.to.next(peer);
|
||||
if(!peer.url){ return } // ignore WebRTC disconnects for now.
|
||||
if(!nav.onLine){ peer.retry = 1 }
|
||||
if(peer.retry){ return }
|
||||
if(axe.fall){ delete axe.fall[peer.url || peer.id] }
|
||||
(function next(){
|
||||
if(!axe.fall){ setTimeout(next, 9); return } // not found yet
|
||||
var fall = Object.keys(axe.fall||''), one = fall[(Math.random()*fall.length) >> 0];
|
||||
if(!fall.length){ lS.peers = ''; one = 'https://gunjs.herokuapp.com/gun' } // out of peers
|
||||
if(peers[one]){ next(); return } // already choose
|
||||
mesh.hi(one);
|
||||
}());
|
||||
});
|
||||
|
||||
root.on('hi', function(peer){ // TEMPORARY! Try to connect all peers.
|
||||
this.to.next(peer);
|
||||
if(!peer.url){ return } // ignore WebRTC disconnects for now.
|
||||
return; // DO NOT COMMIT THIS FEATURE YET! KEEP TESTING NETWORK PERFORMANCE FIRST!
|
||||
(function next(){
|
||||
if(!peer.wire){ return }
|
||||
if(!axe.fall){ setTimeout(next, 9); return } // not found yet
|
||||
var one = (next.fall = next.fall || Object.keys(axe.fall||'')).pop();
|
||||
if(!one){ return }
|
||||
setTimeout(next, 99);
|
||||
mesh.say({dam: 'opt', opt: {peers: one}}, peer);
|
||||
}());
|
||||
});
|
||||
|
||||
function found(text){
|
||||
|
||||
axe.fall = {};
|
||||
((text||'').match(/https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)/ig)||[]).forEach(function(url){
|
||||
axe.fall[url] = {url: url, id: url, retry: 0}; // RETRY
|
||||
});
|
||||
|
||||
return;
|
||||
|
||||
// TODO: Finish porting below? Maybe not.
|
||||
|
||||
Object.keys(last.peers||'').forEach(function(key){
|
||||
tmp = peers[id = key] = peers[id] || {};
|
||||
tmp.id = tmp.url = id;
|
||||
});
|
||||
tmp = peers[id = 'https://guntest.herokuapp.com/gun'] = peers[id] || {};
|
||||
tmp.id = tmp.url = id;
|
||||
|
||||
var mesh = opt.mesh = opt.mesh || Gun.Mesh(root); // DAM!
|
||||
mesh.way = function(msg){
|
||||
if(root.$ === msg.$ || (msg._||'').via){
|
||||
mesh.say(msg, opt.peers);
|
||||
return;
|
||||
}
|
||||
var at = (msg.$||'')._;
|
||||
if(!at){ mesh.say(msg, opt.peers); return }
|
||||
if(msg.get){
|
||||
if(at.axe){ return } // don't ask for it again!
|
||||
at.axe = {};
|
||||
}
|
||||
mesh.say(msg, opt.peers);
|
||||
}
|
||||
}
|
||||
|
||||
if(last){ found(last); return }
|
||||
try{ fetch(((loc.search||'').split('axe=')[1]||'').split('&')[0] || loc.axe || 'https://raw.githubusercontent.com/wiki/amark/gun/volunteer.dht.md').then(function(res){
|
||||
return res.text()
|
||||
}).then(function(text){
|
||||
found(lS.peers = text);
|
||||
}).catch(function(){
|
||||
found(); // nothing
|
||||
})}catch(e){found()}
|
||||
}
|
||||
|
||||
var empty = {}, yes = true;
|
||||
try{ if(typeof module != ''+u){ module.exports = AXE } }catch(e){}
|
||||
}());
|
||||
2313
assets/static/gun.js
2313
assets/static/gun.js
File diff suppressed because it is too large
Load Diff
@@ -1,7 +0,0 @@
|
||||
var Gun = (typeof window !== "undefined")? window.Gun : require('../gun');
|
||||
Gun.chain.open || require('./open');
|
||||
|
||||
Gun.chain.load = function(cb, opt, at){
|
||||
(opt = opt || {}).off = !0;
|
||||
return this.open(cb, opt, at);
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
// assets/static/open.js - Deprecated. Part of Gun library, not used after migration to PouchDB.
|
||||
console.warn('assets/static/open.js is deprecated and unused.');
|
||||
var Gun = (typeof window !== "undefined")? window.Gun || {} : {};
|
||||
|
||||
Gun.chain.open = function(cb, opt, at, depth){ // this is a recursive function, BEWARE!
|
||||
depth = depth || 1;
|
||||
opt = opt || {}; // init top level options.
|
||||
opt.doc = opt.doc || {};
|
||||
opt.ids = opt.ids || {};
|
||||
opt.any = opt.any || cb;
|
||||
opt.meta = opt.meta || false;
|
||||
opt.eve = opt.eve || {off: function(){ // collect all recursive events to unsubscribe to if needed.
|
||||
Object.keys(opt.eve.s).forEach(function(i,e){ // switch to CPU scheduled setTimeout.each?
|
||||
if(e = opt.eve.s[i]){ e.off() }
|
||||
});
|
||||
opt.eve.s = {};
|
||||
}, s:{}}
|
||||
return this.on(function(data, key, ctx, eve){ // subscribe to 1 deeper of data!
|
||||
clearTimeout(opt.to); // do not trigger callback if bunch of changes...
|
||||
opt.to = setTimeout(function(){ // but schedule the callback to fire soon!
|
||||
if(!opt.any){ return }
|
||||
opt.any.call(opt.at.$, opt.doc, opt.key, opt, opt.eve); // call it.
|
||||
if(opt.off){ // check for unsubscribing.
|
||||
opt.eve.off();
|
||||
opt.any = null;
|
||||
}
|
||||
}, opt.wait || 9);
|
||||
opt.at = opt.at || ctx; // opt.at will always be the first context it finds.
|
||||
opt.key = opt.key || key;
|
||||
opt.eve.s[this._.id] = eve; // collect all the events together.
|
||||
if(true === Gun.valid(data)){ // if primitive value...
|
||||
if(!at){
|
||||
opt.doc = data;
|
||||
} else {
|
||||
at[key] = data;
|
||||
}
|
||||
return;
|
||||
}
|
||||
var tmp = this; // else if a sub-object, CPU schedule loop over properties to do recursion.
|
||||
setTimeout.each(Object.keys(data), function(key, val){
|
||||
if('_' === key && !opt.meta){ return }
|
||||
val = data[key];
|
||||
var doc = at || opt.doc, id; // first pass this becomes the root of open, then at is passed below, and will be the parent for each sub-document/object.
|
||||
if(!doc){ return } // if no "parent"
|
||||
if('string' !== typeof (id = Gun.valid(val))){ // if primitive...
|
||||
doc[key] = val;
|
||||
return;
|
||||
}
|
||||
if(opt.ids[id]){ // if we've already seen this sub-object/document
|
||||
doc[key] = opt.ids[id]; // link to itself, our already in-memory one, not a new copy.
|
||||
return;
|
||||
}
|
||||
if(opt.depth <= depth){ // stop recursive open at max depth.
|
||||
doc[key] = doc[key] || val; // show link so app can load it if need.
|
||||
return;
|
||||
} // now open up the recursion of sub-documents!
|
||||
tmp.get(key).open(opt.any, opt, opt.ids[id] = doc[key] = {}, depth+1); // 3rd param is now where we are "at".
|
||||
});
|
||||
})
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
var Gun = (typeof window !== "undefined")? window.Gun : require('../gun');
|
||||
|
||||
Gun.chain.path = function(field, opt){
|
||||
var back = this, gun = back, tmp;
|
||||
if(typeof field === 'string'){
|
||||
tmp = field.split(opt || '.');
|
||||
if(1 === tmp.length){
|
||||
gun = back.get(field);
|
||||
return gun;
|
||||
}
|
||||
field = tmp;
|
||||
}
|
||||
if(field instanceof Array){
|
||||
if(field.length > 1){
|
||||
gun = back;
|
||||
var i = 0, l = field.length;
|
||||
for(i; i < l; i++){
|
||||
//gun = gun.get(field[i], (i+1 === l)? cb : null, opt);
|
||||
gun = gun.get(field[i]);
|
||||
}
|
||||
} else {
|
||||
gun = back.get(field[0]);
|
||||
}
|
||||
return gun;
|
||||
}
|
||||
if(!field && 0 != field){
|
||||
return back;
|
||||
}
|
||||
gun = back.get(''+field);
|
||||
return gun;
|
||||
}
|
||||
@@ -1,606 +0,0 @@
|
||||
;(function(){
|
||||
|
||||
function Radisk(opt){
|
||||
|
||||
opt = opt || {};
|
||||
opt.log = opt.log || console.log;
|
||||
opt.file = String(opt.file || 'radata');
|
||||
var has = (Radisk.has || (Radisk.has = {}))[opt.file];
|
||||
if(has){ return has }
|
||||
|
||||
opt.max = opt.max || (opt.memory? (opt.memory * 999 * 999) : 300000000) * 0.3;
|
||||
opt.until = opt.until || opt.wait || 250;
|
||||
opt.batch = opt.batch || (10 * 1000);
|
||||
opt.chunk = opt.chunk || (1024 * 1024 * 1); // 1MB
|
||||
opt.code = opt.code || {};
|
||||
opt.code.from = opt.code.from || '!';
|
||||
opt.jsonify = true;
|
||||
|
||||
|
||||
function ename(t){ return encodeURIComponent(t).replace(/\*/g, '%2A') } // TODO: Hash this also, but allow migration!
|
||||
function atomic(v){ return u !== v && (!v || 'object' != typeof v) }
|
||||
var timediate = (''+u === typeof setImmediate)? setTimeout : setImmediate;
|
||||
var puff = setTimeout.turn || timediate, u;
|
||||
var map = Radix.object;
|
||||
var ST = 0;
|
||||
|
||||
if(!opt.store){
|
||||
return opt.log("ERROR: Radisk needs `opt.store` interface with `{get: fn, put: fn (, list: fn)}`!");
|
||||
}
|
||||
if(!opt.store.put){
|
||||
return opt.log("ERROR: Radisk needs `store.put` interface with `(file, data, cb)`!");
|
||||
}
|
||||
if(!opt.store.get){
|
||||
return opt.log("ERROR: Radisk needs `store.get` interface with `(file, cb)`!");
|
||||
}
|
||||
if(!opt.store.list){
|
||||
//opt.log("WARNING: `store.list` interface might be needed!");
|
||||
}
|
||||
|
||||
if(''+u != typeof require){ require('./yson') }
|
||||
var parse = JSON.parseAsync || function(t,cb,r){ var u; try{ cb(u, JSON.parse(t,r)) }catch(e){ cb(e) } }
|
||||
var json = JSON.stringifyAsync || function(v,cb,r,s){ var u; try{ cb(u, JSON.stringify(v,r,s)) }catch(e){ cb(e) } }
|
||||
/*
|
||||
Any and all storage adapters should...
|
||||
1. Because writing to disk takes time, we should batch data to disk. This improves performance, and reduces potential disk corruption.
|
||||
2. If a batch exceeds a certain number of writes, we should immediately write to disk when physically possible. This caps total performance, but reduces potential loss.
|
||||
*/
|
||||
var r = function(key, data, cb, tag, DBG){
|
||||
if('function' === typeof data){
|
||||
var o = cb || {};
|
||||
cb = data;
|
||||
r.read(key, cb, o, DBG || tag);
|
||||
return;
|
||||
}
|
||||
//var tmp = (tmp = r.batch = r.batch || {})[key] = tmp[key] || {};
|
||||
//var tmp = (tmp = r.batch = r.batch || {})[key] = data;
|
||||
r.save(key, data, cb, tag, DBG);
|
||||
}
|
||||
r.save = function(key, data, cb, tag, DBG){
|
||||
var s = {key: key}, tags, f, d, q;
|
||||
s.find = function(file){ var tmp;
|
||||
s.file = file || (file = opt.code.from);
|
||||
DBG && (DBG = DBG[file] = DBG[file] || {});
|
||||
DBG && (DBG.sf = DBG.sf || +new Date);
|
||||
//console.only.i && console.log('found', file);
|
||||
if(tmp = r.disk[file]){ s.mix(u, tmp); return }
|
||||
r.parse(file, s.mix, u, DBG);
|
||||
}
|
||||
s.mix = function(err, disk){
|
||||
DBG && (DBG.sml = +new Date);
|
||||
DBG && (DBG.sm = DBG.sm || +new Date);
|
||||
if(s.err = err || s.err){ cb(err); return } // TODO: HANDLE BATCH EMIT
|
||||
var file = s.file = (disk||'').file || s.file, tmp;
|
||||
if(!disk && file !== opt.code.from){ // corrupt file?
|
||||
r.find.bad(file); // remove from dir list
|
||||
r.save(key, data, cb, tag); // try again
|
||||
return;
|
||||
}
|
||||
(disk = r.disk[file] || (r.disk[file] = disk || Radix())).file || (disk.file = file);
|
||||
if(opt.compare){
|
||||
data = opt.compare(disk(key), data, key, file);
|
||||
if(u === data){ cb(err, -1); return } // TODO: HANDLE BATCH EMIT
|
||||
}
|
||||
(s.disk = disk)(key, data);
|
||||
if(tag){
|
||||
(tmp = (tmp = disk.tags || (disk.tags = {}))[tag] || (tmp[tag] = r.tags[tag] || (r.tags[tag] = {})))[file] || (tmp[file] = r.one[tag] || (r.one[tag] = cb));
|
||||
cb = null;
|
||||
}
|
||||
DBG && (DBG.st = DBG.st || +new Date);
|
||||
//console.only.i && console.log('mix', disk.Q);
|
||||
if(disk.Q){ cb && disk.Q.push(cb); return } disk.Q = (cb? [cb] : []);
|
||||
disk.to = setTimeout(s.write, opt.until);
|
||||
}
|
||||
s.write = function(){
|
||||
DBG && (DBG.sto = DBG.sto || +new Date);
|
||||
var file = f = s.file, disk = d = s.disk;
|
||||
q = s.q = disk.Q;
|
||||
tags = s.tags = disk.tags;
|
||||
delete disk.Q;
|
||||
delete r.disk[file];
|
||||
delete disk.tags;
|
||||
//console.only.i && console.log('write', file, disk, 'was saving:', key, data);
|
||||
r.write(file, disk, s.ack, u, DBG);
|
||||
}
|
||||
s.ack = function(err, ok){
|
||||
DBG && (DBG.sa = DBG.sa || +new Date);
|
||||
DBG && (DBG.sal = q.length);
|
||||
var ack, tmp;
|
||||
// TODO!!!! CHANGE THIS INTO PUFF!!!!!!!!!!!!!!!!
|
||||
for(var id in r.tags){
|
||||
if(!r.tags.hasOwnProperty(id)){ continue } var tag = r.tags[id];
|
||||
if((tmp = r.disk[f]) && (tmp = tmp.tags) && tmp[tag]){ continue }
|
||||
ack = tag[f];
|
||||
delete tag[f];
|
||||
var ne; for(var k in tag){ if(tag.hasOwnProperty(k)){ ne = true; break } } // is not empty?
|
||||
if(ne){ continue } //if(!obj_empty(tag)){ continue }
|
||||
delete r.tags[tag];
|
||||
ack && ack(err, ok);
|
||||
}
|
||||
!q && (q = '');
|
||||
var l = q.length, i = 0;
|
||||
// TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
|
||||
// TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
|
||||
// TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
|
||||
// TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
|
||||
// TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
|
||||
// TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
|
||||
// TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
|
||||
var S = +new Date;
|
||||
for(;i < l; i++){ (ack = q[i]) && ack(err, ok) }
|
||||
console.STAT && console.STAT(S, +new Date - S, 'rad acks', ename(s.file));
|
||||
console.STAT && console.STAT(S, q.length, 'rad acks #', ename(s.file));
|
||||
}
|
||||
cb || (cb = function(err, ok){ // test delete!
|
||||
if(!err){ return }
|
||||
});
|
||||
//console.only.i && console.log('save', key);
|
||||
r.find(key, s.find);
|
||||
}
|
||||
r.disk = {};
|
||||
r.one = {};
|
||||
r.tags = {};
|
||||
|
||||
/*
|
||||
Any storage engine at some point will have to do a read in order to write.
|
||||
This is true of even systems that use an append only log, if they support updates.
|
||||
Therefore it is unavoidable that a read will have to happen,
|
||||
the question is just how long you delay it.
|
||||
*/
|
||||
var RWC = 0;
|
||||
r.write = function(file, rad, cb, o, DBG){
|
||||
if(!rad){ cb('No radix!'); return }
|
||||
o = ('object' == typeof o)? o : {force: o};
|
||||
var f = function Fractal(){}, a, b;
|
||||
f.text = '';
|
||||
f.file = file = rad.file || (rad.file = file);
|
||||
if(!file){ cb('What file?'); return }
|
||||
f.write = function(){
|
||||
var text = rad.raw = f.text;
|
||||
r.disk[file = rad.file || f.file || file] = rad;
|
||||
var S = +new Date;
|
||||
DBG && (DBG.wd = S);
|
||||
//console.only.i && console.log('add', file);
|
||||
r.find.add(file, function add(err){
|
||||
DBG && (DBG.wa = +new Date);
|
||||
if(err){ cb(err); return }
|
||||
//console.only.i && console.log('disk', file, text);
|
||||
opt.store.put(ename(file), text, function safe(err, ok){
|
||||
DBG && (DBG.wp = +new Date);
|
||||
console.STAT && console.STAT(S, ST = +new Date - S, "wrote disk", JSON.stringify(file), ++RWC, 'total all writes.');
|
||||
//console.only.i && console.log('done', err, ok || 1, cb);
|
||||
cb(err, ok || 1);
|
||||
if(!rad.Q){ delete r.disk[file] } // VERY IMPORTANT! Clean up memory, but not if there is already queued writes on it!
|
||||
});
|
||||
});
|
||||
}
|
||||
f.split = function(){
|
||||
var S = +new Date;
|
||||
DBG && (DBG.wf = S);
|
||||
f.text = '';
|
||||
if(!f.count){ f.count = 0;
|
||||
Radix.map(rad, function count(){ f.count++ }); // TODO: Perf? Any faster way to get total length?
|
||||
}
|
||||
DBG && (DBG.wfc = f.count);
|
||||
f.limit = Math.ceil(f.count/2);
|
||||
var SC = f.count;
|
||||
f.count = 0;
|
||||
DBG && (DBG.wf1 = +new Date);
|
||||
f.sub = Radix();
|
||||
Radix.map(rad, f.slice, {reverse: 1}); // IMPORTANT: DO THIS IN REVERSE, SO LAST HALF OF DATA MOVED TO NEW FILE BEFORE DROPPING FROM CURRENT FILE.
|
||||
DBG && (DBG.wf2 = +new Date);
|
||||
r.write(f.end, f.sub, f.both, o);
|
||||
DBG && (DBG.wf3 = +new Date);
|
||||
f.hub = Radix();
|
||||
Radix.map(rad, f.stop);
|
||||
DBG && (DBG.wf4 = +new Date);
|
||||
r.write(rad.file, f.hub, f.both, o);
|
||||
DBG && (DBG.wf5 = +new Date);
|
||||
console.STAT && console.STAT(S, +new Date - S, "rad split", ename(rad.file), SC);
|
||||
return true;
|
||||
}
|
||||
f.slice = function(val, key){
|
||||
f.sub(f.end = key, val);
|
||||
if(f.limit <= (++f.count)){ return true }
|
||||
}
|
||||
f.stop = function(val, key){
|
||||
if(key >= f.end){ return true }
|
||||
f.hub(key, val);
|
||||
}
|
||||
f.both = function(err, ok){
|
||||
DBG && (DBG.wfd = +new Date);
|
||||
if(b){ cb(err || b); return }
|
||||
if(a){ cb(err, ok); return }
|
||||
a = true;
|
||||
b = err;
|
||||
}
|
||||
f.each = function(val, key, k, pre){
|
||||
if(u !== val){ f.count++ }
|
||||
if(opt.max <= (val||'').length){ return cb("Data too big!"), true }
|
||||
var enc = Radisk.encode(pre.length) +'#'+ Radisk.encode(k) + (u === val? '' : ':'+ Radisk.encode(val)) +'\n';
|
||||
if((opt.chunk < f.text.length + enc.length) && (1 < f.count) && !o.force){
|
||||
return f.split();
|
||||
}
|
||||
f.text += enc;
|
||||
}
|
||||
//console.only.i && console.log('writing');
|
||||
if(opt.jsonify){ r.write.jsonify(f, rad, cb, o, DBG); return } // temporary testing idea
|
||||
if(!Radix.map(rad, f.each, true)){ f.write() }
|
||||
}
|
||||
|
||||
r.write.jsonify = function(f, rad, cb, o, DBG){
|
||||
var raw;
|
||||
var S = +new Date;
|
||||
DBG && (DBG.w = S);
|
||||
try{raw = JSON.stringify(rad.$);
|
||||
}catch(e){ cb("Cannot radisk!"); return }
|
||||
DBG && (DBG.ws = +new Date);
|
||||
console.STAT && console.STAT(S, +new Date - S, "rad stringified JSON");
|
||||
if(opt.chunk < raw.length && !o.force){
|
||||
var c = 0;
|
||||
Radix.map(rad, function(){
|
||||
if(c++){ return true } // more than 1 item
|
||||
});
|
||||
if(c > 1){
|
||||
return f.split();
|
||||
}
|
||||
}
|
||||
f.text = raw;
|
||||
f.write();
|
||||
}
|
||||
|
||||
r.range = function(tree, o){
|
||||
if(!tree || !o){ return }
|
||||
if(u === o.start && u === o.end){ return tree }
|
||||
if(atomic(tree)){ return tree }
|
||||
var sub = Radix();
|
||||
Radix.map(tree, function(v,k){ sub(k,v) }, o); // ONLY PLACE THAT TAKES TREE, maybe reduce API for better perf?
|
||||
return sub('');
|
||||
}
|
||||
|
||||
;(function(){
|
||||
r.read = function(key, cb, o, DBG){
|
||||
o = o || {};
|
||||
var g = {key: key};
|
||||
g.find = function(file){ var tmp;
|
||||
g.file = file || (file = opt.code.from);
|
||||
DBG && (DBG = DBG[file] = DBG[file] || {});
|
||||
DBG && (DBG.rf = DBG.rf || +new Date);
|
||||
if(tmp = r.disk[g.file = file]){ g.check(u, tmp); return }
|
||||
r.parse(file, g.check, u, DBG);
|
||||
}
|
||||
g.get = function(err, disk, info){
|
||||
DBG && (DBG.rgl = +new Date);
|
||||
DBG && (DBG.rg = DBG.rg || +new Date);
|
||||
if(g.err = err || g.err){ cb(err); return }
|
||||
var file = g.file = (disk||'').file || g.file;
|
||||
if(!disk && file !== opt.code.from){ // corrupt file?
|
||||
r.find.bad(file); // remove from dir list
|
||||
r.read(key, cb, o); // try again
|
||||
return;
|
||||
}
|
||||
disk = r.disk[file] || (r.disk[file] = disk);
|
||||
if(!disk){ cb(file === opt.code.from? u : "No file!"); return }
|
||||
disk.file || (disk.file = file);
|
||||
var data = r.range(disk(key), o);
|
||||
DBG && (DBG.rr = +new Date);
|
||||
o.unit = disk.unit;
|
||||
o.chunks = (o.chunks || 0) + 1;
|
||||
o.parsed = (o.parsed || 0) + ((info||'').parsed||(o.chunks*opt.chunk));
|
||||
o.more = 1;
|
||||
o.next = u;
|
||||
Radix.map(r.list, function next(v,f){
|
||||
if(!v || file === f){ return }
|
||||
o.next = f;
|
||||
return 1;
|
||||
}, o.reverse? {reverse: 1, end: file} : {start: file});
|
||||
DBG && (DBG.rl = +new Date);
|
||||
if(!o.next){ o.more = 0 }
|
||||
if(o.next){
|
||||
if(!o.reverse && ((key < o.next && 0 != o.next.indexOf(key)) || (u !== o.end && (o.end || '\uffff') < o.next))){ o.more = 0 }
|
||||
if(o.reverse && ((key > o.next && 0 != key.indexOf(o.next)) || ((u !== o.start && (o.start || '') > o.next && file <= o.start)))){ o.more = 0 }
|
||||
}
|
||||
//console.log(5, process.memoryUsage().heapUsed);
|
||||
if(!o.more){ cb(g.err, data, o); return }
|
||||
if(data){ cb(g.err, data, o) }
|
||||
if(o.parsed >= o.limit){ return }
|
||||
var S = +new Date;
|
||||
DBG && (DBG.rm = S);
|
||||
var next = o.next;
|
||||
timediate(function(){
|
||||
console.STAT && console.STAT(S, +new Date - S, 'rad more');
|
||||
r.parse(next, g.check);
|
||||
},0);
|
||||
}
|
||||
g.check = function(err, disk, info){
|
||||
//console.log(4, process.memoryUsage().heapUsed);
|
||||
g.get(err, disk, info);
|
||||
if(!disk || disk.check){ return } disk.check = 1;
|
||||
var S = +new Date;
|
||||
(info || (info = {})).file || (info.file = g.file);
|
||||
Radix.map(disk, function(val, key){
|
||||
// assume in memory for now, since both write/read already call r.find which will init it.
|
||||
r.find(key, function(file){
|
||||
if((file || (file = opt.code.from)) === info.file){ return }
|
||||
var id = (''+Math.random()).slice(-3);
|
||||
puff(function(){
|
||||
r.save(key, val, function ack(err, ok){
|
||||
if(err){ r.save(key, val, ack); return } // ad infinitum???
|
||||
// TODO: NOTE!!! Mislocated data could be because of a synchronous `put` from the `g.get(` other than perf shouldn't we do the check first before acking?
|
||||
console.STAT && console.STAT("MISLOCATED DATA CORRECTED", id, ename(key), ename(info.file), ename(file));
|
||||
});
|
||||
},0);
|
||||
})
|
||||
});
|
||||
console.STAT && console.STAT(S, +new Date - S, "rad check");
|
||||
}
|
||||
r.find(key || (o.reverse? (o.end||'') : (o.start||'')), g.find);
|
||||
}
|
||||
function rev(a,b){ return b }
|
||||
var revo = {reverse: true};
|
||||
}());
|
||||
|
||||
;(function(){
|
||||
/*
|
||||
Let us start by assuming we are the only process that is
|
||||
changing the directory or bucket. Not because we do not want
|
||||
to be multi-process/machine, but because we want to experiment
|
||||
with how much performance and scale we can get out of only one.
|
||||
Then we can work on the harder problem of being multi-process.
|
||||
*/
|
||||
var RPC = 0;
|
||||
var Q = {}, s = String.fromCharCode(31);
|
||||
r.parse = function(file, cb, raw, DBG){ var q;
|
||||
if(!file){ return cb(); }
|
||||
if(q = Q[file]){ q.push(cb); return } q = Q[file] = [cb];
|
||||
var p = function Parse(){}, info = {file: file};
|
||||
(p.disk = Radix()).file = file;
|
||||
p.read = function(err, data){ var tmp;
|
||||
DBG && (DBG.rpg = +new Date);
|
||||
console.STAT && console.STAT(S, +new Date - S, 'read disk', JSON.stringify(file), ++RPC, 'total all parses.');
|
||||
//console.log(2, process.memoryUsage().heapUsed);
|
||||
if((p.err = err) || (p.not = !data)){
|
||||
delete Q[file];
|
||||
p.map(q, p.ack);
|
||||
return;
|
||||
}
|
||||
if('string' !== typeof data){
|
||||
try{
|
||||
if(opt.max <= data.length){
|
||||
p.err = "Chunk too big!";
|
||||
} else {
|
||||
data = data.toString(); // If it crashes, it crashes here. How!?? We check size first!
|
||||
}
|
||||
}catch(e){ p.err = e }
|
||||
if(p.err){
|
||||
delete Q[file];
|
||||
p.map(q, p.ack);
|
||||
return;
|
||||
}
|
||||
}
|
||||
info.parsed = data.length;
|
||||
DBG && (DBG.rpl = info.parsed);
|
||||
DBG && (DBG.rpa = q.length);
|
||||
S = +new Date;
|
||||
if(!(opt.jsonify || '{' === data[0])){
|
||||
p.radec(err, data);
|
||||
return;
|
||||
}
|
||||
parse(data, function(err, tree){
|
||||
//console.log(3, process.memoryUsage().heapUsed);
|
||||
if(!err){
|
||||
delete Q[file];
|
||||
p.disk.$ = tree;
|
||||
console.STAT && (ST = +new Date - S) > 9 && console.STAT(S, ST, 'rad parsed JSON');
|
||||
DBG && (DBG.rpd = +new Date);
|
||||
p.map(q, p.ack); // hmmm, v8 profiler can't see into this cause of try/catch?
|
||||
return;
|
||||
}
|
||||
if('{' === data[0]){
|
||||
delete Q[file];
|
||||
p.err = tmp || "JSON error!";
|
||||
p.map(q, p.ack);
|
||||
return;
|
||||
}
|
||||
p.radec(err, data);
|
||||
});
|
||||
}
|
||||
p.map = function(){ // switch to setTimeout.each now?
|
||||
if(!q || !q.length){ return }
|
||||
//var i = 0, l = q.length, ack;
|
||||
var S = +new Date;
|
||||
var err = p.err, data = p.not? u : p.disk;
|
||||
var i = 0, ack; while(i < 9 && (ack = q[i++])){ ack(err, data, info) } // too much?
|
||||
console.STAT && console.STAT(S, +new Date - S, 'rad packs', ename(file));
|
||||
console.STAT && console.STAT(S, i, 'rad packs #', ename(file));
|
||||
if(!(q = q.slice(i)).length){ return }
|
||||
puff(p.map, 0);
|
||||
}
|
||||
p.ack = function(cb){
|
||||
if(!cb){ return }
|
||||
if(p.err || p.not){
|
||||
cb(p.err, u, info);
|
||||
return;
|
||||
}
|
||||
cb(u, p.disk, info);
|
||||
}
|
||||
p.radec = function(err, data){
|
||||
delete Q[file];
|
||||
S = +new Date;
|
||||
var tmp = p.split(data), pre = [], i, k, v;
|
||||
if(!tmp || 0 !== tmp[1]){
|
||||
p.err = "File '"+file+"' does not have root radix! ";
|
||||
p.map(q, p.ack);
|
||||
return;
|
||||
}
|
||||
while(tmp){
|
||||
k = v = u;
|
||||
i = tmp[1];
|
||||
tmp = p.split(tmp[2])||'';
|
||||
if('#' == tmp[0]){
|
||||
k = tmp[1];
|
||||
pre = pre.slice(0,i);
|
||||
if(i <= pre.length){
|
||||
pre.push(k);
|
||||
}
|
||||
}
|
||||
tmp = p.split(tmp[2])||'';
|
||||
if('\n' == tmp[0]){ continue }
|
||||
if('=' == tmp[0] || ':' == tmp[0]){ v = tmp[1] }
|
||||
if(u !== k && u !== v){ p.disk(pre.join(''), v) }
|
||||
tmp = p.split(tmp[2]);
|
||||
}
|
||||
console.STAT && console.STAT(S, +new Date - S, 'parsed RAD');
|
||||
p.map(q, p.ack);
|
||||
};
|
||||
p.split = function(t){
|
||||
if(!t){ return }
|
||||
var l = [], o = {}, i = -1, a = '', b, c;
|
||||
i = t.indexOf(s);
|
||||
if(!t[i]){ return }
|
||||
a = t.slice(0, i);
|
||||
l[0] = a;
|
||||
l[1] = b = Radisk.decode(t.slice(i), o);
|
||||
l[2] = t.slice(i + o.i);
|
||||
return l;
|
||||
}
|
||||
if(r.disk){ raw || (raw = (r.disk[file]||'').raw) }
|
||||
var S = +new Date, SM, SL;
|
||||
DBG && (DBG.rp = S);
|
||||
if(raw){ return puff(function(){ p.read(u, raw) }, 0) }
|
||||
opt.store.get(ename(file), p.read);
|
||||
// TODO: What if memory disk gets filled with updates, and we get an old one back?
|
||||
}
|
||||
}());
|
||||
|
||||
;(function(){
|
||||
var dir, f = String.fromCharCode(28), Q;
|
||||
r.find = function(key, cb){
|
||||
if(!dir){
|
||||
if(Q){ Q.push([key, cb]); return } Q = [[key, cb]];
|
||||
r.parse(f, init);
|
||||
return;
|
||||
}
|
||||
Radix.map(r.list = dir, function(val, key){
|
||||
if(!val){ return }
|
||||
return cb(key) || true;
|
||||
}, {reverse: 1, end: key}) || cb(opt.code.from);
|
||||
}
|
||||
r.find.add = function(file, cb){
|
||||
var has = dir(file);
|
||||
if(has || file === f){ cb(u, 1); return }
|
||||
dir(file, 1);
|
||||
cb.found = (cb.found || 0) + 1;
|
||||
r.write(f, dir, function(err, ok){
|
||||
if(err){ cb(err); return }
|
||||
cb.found = (cb.found || 0) - 1;
|
||||
if(0 !== cb.found){ return }
|
||||
cb(u, 1);
|
||||
}, true);
|
||||
}
|
||||
r.find.bad = function(file, cb){
|
||||
dir(file, 0);
|
||||
r.write(f, dir, cb||noop);
|
||||
}
|
||||
function init(err, disk){
|
||||
if(err){
|
||||
opt.log('list', err);
|
||||
setTimeout(function(){ r.parse(f, init) }, 1000);
|
||||
return;
|
||||
}
|
||||
if(disk){ drain(disk); return }
|
||||
dir = dir || disk || Radix();
|
||||
if(!opt.store.list){ drain(dir); return }
|
||||
// import directory.
|
||||
opt.store.list(function(file){
|
||||
if(!file){ drain(dir); return }
|
||||
r.find.add(file, noop);
|
||||
});
|
||||
}
|
||||
function drain(rad, tmp){
|
||||
dir = dir || rad;
|
||||
dir.file = f;
|
||||
tmp = Q; Q = null;
|
||||
map(tmp, function(arg){
|
||||
r.find(arg[0], arg[1]);
|
||||
});
|
||||
}
|
||||
}());
|
||||
|
||||
try{ !Gun.window && require('./radmigtmp')(r) }catch(e){}
|
||||
|
||||
var noop = function(){}, RAD, u;
|
||||
Radisk.has[opt.file] = r;
|
||||
return r;
|
||||
}
|
||||
|
||||
;(function(){
|
||||
var _ = String.fromCharCode(31), u;
|
||||
Radisk.encode = function(d, o, s){ s = s || _;
|
||||
var t = s, tmp;
|
||||
if(typeof d == 'string'){
|
||||
var i = d.indexOf(s);
|
||||
while(i != -1){ t += s; i = d.indexOf(s, i+1) }
|
||||
return t + '"' + d + s;
|
||||
} else
|
||||
if(d && d['#'] && 1 == Object.keys(d).length){
|
||||
return t + '#' + tmp + t;
|
||||
} else
|
||||
if('number' == typeof d){
|
||||
return t + '+' + (d||0) + t;
|
||||
} else
|
||||
if(null === d){
|
||||
return t + ' ' + t;
|
||||
} else
|
||||
if(true === d){
|
||||
return t + '+' + t;
|
||||
} else
|
||||
if(false === d){
|
||||
return t + '-' + t;
|
||||
}// else
|
||||
//if(binary){}
|
||||
}
|
||||
Radisk.decode = function(t, o, s){ s = s || _;
|
||||
var d = '', i = -1, n = 0, c, p;
|
||||
if(s !== t[0]){ return }
|
||||
while(s === t[++i]){ ++n }
|
||||
p = t[c = n] || true;
|
||||
while(--n >= 0){ i = t.indexOf(s, i+1) }
|
||||
if(i == -1){ i = t.length }
|
||||
d = t.slice(c+1, i);
|
||||
if(o){ o.i = i+1 }
|
||||
if('"' === p){
|
||||
return d;
|
||||
} else
|
||||
if('#' === p){
|
||||
return {'#':d};
|
||||
} else
|
||||
if('+' === p){
|
||||
if(0 === d.length){
|
||||
return true;
|
||||
}
|
||||
return parseFloat(d);
|
||||
} else
|
||||
if(' ' === p){
|
||||
return null;
|
||||
} else
|
||||
if('-' === p){
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}());
|
||||
|
||||
if(typeof window !== "undefined"){
|
||||
var Gun = window.Gun;
|
||||
var Radix = window.Radix;
|
||||
window.Radisk = Radisk;
|
||||
} else {
|
||||
var Gun = require('../gun');
|
||||
var Radix = require('./radix');
|
||||
//var Radix = require('./radix2'); Radisk = require('./radisk2');
|
||||
try{ module.exports = Radisk }catch(e){}
|
||||
}
|
||||
|
||||
Radisk.Radix = Radix;
|
||||
|
||||
}());
|
||||
@@ -1,124 +0,0 @@
|
||||
;(function(){
|
||||
|
||||
function Radix(){
|
||||
var radix = function(key, val, t){
|
||||
radix.unit = 0;
|
||||
if(!t && u !== val){
|
||||
radix.last = (''+key < radix.last)? radix.last : ''+key;
|
||||
delete (radix.$||{})[_];
|
||||
}
|
||||
t = t || radix.$ || (radix.$ = {});
|
||||
if(!key && Object.keys(t).length){ return t }
|
||||
key = ''+key;
|
||||
var i = 0, l = key.length-1, k = key[i], at, tmp;
|
||||
while(!(at = t[k]) && i < l){
|
||||
k += key[++i];
|
||||
}
|
||||
if(!at){
|
||||
if(!each(t, function(r, s){
|
||||
var ii = 0, kk = '';
|
||||
if((s||'').length){ while(s[ii] == key[ii]){
|
||||
kk += s[ii++];
|
||||
} }
|
||||
if(kk){
|
||||
if(u === val){
|
||||
if(ii <= l){ return }
|
||||
(tmp || (tmp = {}))[s.slice(ii)] = r;
|
||||
//(tmp[_] = function $(){ $.sort = Object.keys(tmp).sort(); return $ }()); // get rid of this one, cause it is on read?
|
||||
return r;
|
||||
}
|
||||
var __ = {};
|
||||
__[s.slice(ii)] = r;
|
||||
ii = key.slice(ii);
|
||||
('' === ii)? (__[''] = val) : ((__[ii] = {})[''] = val);
|
||||
//(__[_] = function $(){ $.sort = Object.keys(__).sort(); return $ }());
|
||||
t[kk] = __;
|
||||
if(Radix.debug && 'undefined' === ''+kk){ console.log(0, kk); debugger }
|
||||
delete t[s];
|
||||
//(t[_] = function $(){ $.sort = Object.keys(t).sort(); return $ }());
|
||||
return true;
|
||||
}
|
||||
})){
|
||||
if(u === val){ return; }
|
||||
(t[k] || (t[k] = {}))[''] = val;
|
||||
if(Radix.debug && 'undefined' === ''+k){ console.log(1, k); debugger }
|
||||
//(t[_] = function $(){ $.sort = Object.keys(t).sort(); return $ }());
|
||||
}
|
||||
if(u === val){
|
||||
return tmp;
|
||||
}
|
||||
} else
|
||||
if(i == l){
|
||||
//if(u === val){ return (u === (tmp = at['']))? at : tmp } // THIS CODE IS CORRECT, below is
|
||||
if(u === val){ return (u === (tmp = at['']))? at : ((radix.unit = 1) && tmp) } // temporary help??
|
||||
at[''] = val;
|
||||
//(at[_] = function $(){ $.sort = Object.keys(at).sort(); return $ }());
|
||||
} else {
|
||||
if(u !== val){ delete at[_] }
|
||||
//at && (at[_] = function $(){ $.sort = Object.keys(at).sort(); return $ }());
|
||||
return radix(key.slice(++i), val, at || (at = {}));
|
||||
}
|
||||
}
|
||||
return radix;
|
||||
};
|
||||
|
||||
Radix.map = function rap(radix, cb, opt, pre){
|
||||
try {
|
||||
pre = pre || []; // TODO: BUG: most out-of-memory crashes come from here.
|
||||
var t = ('function' == typeof radix)? radix.$ || {} : radix;
|
||||
//!opt && console.log("WHAT IS T?", JSON.stringify(t).length);
|
||||
if(!t){ return }
|
||||
if('string' == typeof t){ if(Radix.debug){ throw ['BUG:', radix, cb, opt, pre] } return; }
|
||||
var keys = (t[_]||no).sort || (t[_] = function $(){ $.sort = Object.keys(t).sort(); return $ }()).sort, rev; // ONLY 17% of ops are pre-sorted!
|
||||
//var keys = Object.keys(t).sort();
|
||||
opt = (true === opt)? {branch: true} : (opt || {});
|
||||
if(rev = opt.reverse){ keys = keys.slice(0).reverse() }
|
||||
var start = opt.start, end = opt.end, END = '\uffff';
|
||||
var i = 0, l = keys.length;
|
||||
for(;i < l; i++){ var key = keys[i], tree = t[key], tmp, p, pt;
|
||||
if(!tree || '' === key || _ === key || 'undefined' === key){ continue }
|
||||
p = pre.slice(0); p.push(key);
|
||||
pt = p.join('');
|
||||
if(u !== start && pt < (start||'').slice(0,pt.length)){ continue }
|
||||
if(u !== end && (end || END) < pt){ continue }
|
||||
if(rev){ // children must be checked first when going in reverse.
|
||||
tmp = rap(tree, cb, opt, p);
|
||||
if(u !== tmp){ return tmp }
|
||||
}
|
||||
if(u !== (tmp = tree[''])){
|
||||
var yes = 1;
|
||||
if(u !== start && pt < (start||'')){ yes = 0 }
|
||||
if(u !== end && pt > (end || END)){ yes = 0 }
|
||||
if(yes){
|
||||
tmp = cb(tmp, pt, key, pre);
|
||||
if(u !== tmp){ return tmp }
|
||||
}
|
||||
} else
|
||||
if(opt.branch){
|
||||
tmp = cb(u, pt, key, pre);
|
||||
if(u !== tmp){ return tmp }
|
||||
}
|
||||
pre = p;
|
||||
if(!rev){
|
||||
tmp = rap(tree, cb, opt, pre);
|
||||
if(u !== tmp){ return tmp }
|
||||
}
|
||||
pre.pop();
|
||||
}
|
||||
} catch (e) { console.error(e); }
|
||||
};
|
||||
|
||||
if(typeof window !== "undefined"){
|
||||
window.Radix = Radix;
|
||||
} else {
|
||||
try{ module.exports = Radix }catch(e){}
|
||||
}
|
||||
var each = Radix.object = function(o, f, r){
|
||||
for(var k in o){
|
||||
if(!o.hasOwnProperty(k)){ continue }
|
||||
if((r = f(o[k], k)) !== u){ return r }
|
||||
}
|
||||
}, no = {}, u;
|
||||
var _ = String.fromCharCode(24);
|
||||
|
||||
}());
|
||||
@@ -1,79 +0,0 @@
|
||||
;(function(){
|
||||
/* // from @jabis
|
||||
if (navigator.storage && navigator.storage.estimate) {
|
||||
const quota = await navigator.storage.estimate();
|
||||
// quota.usage -> Number of bytes used.
|
||||
// quota.quota -> Maximum number of bytes available.
|
||||
const percentageUsed = (quota.usage / quota.quota) * 100;
|
||||
console.log(`You've used ${percentageUsed}% of the available storage.`);
|
||||
const remaining = quota.quota - quota.usage;
|
||||
console.log(`You can write up to ${remaining} more bytes.`);
|
||||
}
|
||||
*/
|
||||
function Store(opt){
|
||||
opt = opt || {};
|
||||
opt.file = String(opt.file || 'radata');
|
||||
var store = Store[opt.file], db = null, u;
|
||||
|
||||
if(store){
|
||||
console.log("Warning: reusing same IndexedDB store and options as 1st.");
|
||||
return Store[opt.file];
|
||||
}
|
||||
store = Store[opt.file] = function(){};
|
||||
|
||||
try{opt.indexedDB = opt.indexedDB || Store.indexedDB || indexedDB}catch(e){}
|
||||
try{if(!opt.indexedDB || 'file:' == location.protocol){
|
||||
var s = store.d || (store.d = {});
|
||||
store.put = function(f, d, cb){ s[f] = d; setTimeout(function(){ cb(null, 1) },250) };
|
||||
store.get = function(f, cb){ setTimeout(function(){ cb(null, s[f] || u) },5) };
|
||||
console.log('Warning: No indexedDB exists to persist data to!');
|
||||
return store;
|
||||
}}catch(e){}
|
||||
|
||||
|
||||
store.start = function(){
|
||||
var o = indexedDB.open(opt.file, 1);
|
||||
o.onupgradeneeded = function(eve){ (eve.target.result).createObjectStore(opt.file) }
|
||||
o.onsuccess = function(){ db = o.result }
|
||||
o.onerror = function(eve){ console.log(eve||1); }
|
||||
}; store.start();
|
||||
|
||||
store.put = function(key, data, cb){
|
||||
if(!db){ setTimeout(function(){ store.put(key, data, cb) },1); return }
|
||||
var tx = db.transaction([opt.file], 'readwrite');
|
||||
var obj = tx.objectStore(opt.file);
|
||||
var req = obj.put(data, ''+key);
|
||||
req.onsuccess = obj.onsuccess = tx.onsuccess = function(){ cb(null, 1) }
|
||||
req.onabort = obj.onabort = tx.onabort = function(eve){ cb(eve||'put.tx.abort') }
|
||||
req.onerror = obj.onerror = tx.onerror = function(eve){ cb(eve||'put.tx.error') }
|
||||
}
|
||||
|
||||
store.get = function(key, cb){
|
||||
if(!db){ setTimeout(function(){ store.get(key, cb) },9); return }
|
||||
var tx = db.transaction([opt.file], 'readonly');
|
||||
var obj = tx.objectStore(opt.file);
|
||||
var req = obj.get(''+key);
|
||||
req.onsuccess = function(){ cb(null, req.result) }
|
||||
req.onabort = function(eve){ cb(eve||4) }
|
||||
req.onerror = function(eve){ cb(eve||5) }
|
||||
}
|
||||
setInterval(function(){ db && db.close(); db = null; store.start() }, 1000 * 15); // reset webkit bug?
|
||||
return store;
|
||||
}
|
||||
|
||||
if(typeof window !== "undefined"){
|
||||
(Store.window = window).RindexedDB = Store;
|
||||
Store.indexedDB = window.indexedDB; // safari bug
|
||||
} else {
|
||||
try{ module.exports = Store }catch(e){}
|
||||
}
|
||||
|
||||
try{
|
||||
var Gun = Store.window.Gun || require('../gun');
|
||||
Gun.on('create', function(root){
|
||||
this.to.next(root);
|
||||
root.opt.store = root.opt.store || Store(root.opt);
|
||||
});
|
||||
}catch(e){}
|
||||
|
||||
}());
|
||||
1537
assets/static/sea.js
1537
assets/static/sea.js
File diff suppressed because it is too large
Load Diff
7
assets/static/simplemde.min.css
vendored
7
assets/static/simplemde.min.css
vendored
File diff suppressed because one or more lines are too long
15
assets/static/simplemde.min.js
vendored
15
assets/static/simplemde.min.js
vendored
File diff suppressed because one or more lines are too long
@@ -1,150 +0,0 @@
|
||||
var Gun = (typeof window !== "undefined")? window.Gun : require('../gun');
|
||||
|
||||
Gun.on('create', function(root){
|
||||
if(Gun.TESTING){ root.opt.file = 'radatatest' }
|
||||
this.to.next(root);
|
||||
var opt = root.opt, empty = {}, u;
|
||||
if(false === opt.rad || false === opt.radisk){ return }
|
||||
if((u+'' != typeof process) && 'false' === ''+(process.env||'').RAD){ return }
|
||||
var Radisk = (Gun.window && Gun.window.Radisk) || require('./radisk');
|
||||
var Radix = Radisk.Radix;
|
||||
var dare = Radisk(opt), esc = String.fromCharCode(27);
|
||||
var ST = 0;
|
||||
|
||||
root.on('put', function(msg){
|
||||
this.to.next(msg);
|
||||
if((msg._||'').rad){ return } // don't save what just came from a read.
|
||||
//if(msg['@']){ return } // WHY DID I NOT ADD THIS?
|
||||
var id = msg['#'], put = msg.put, soul = put['#'], key = put['.'], val = put[':'], state = put['>'], tmp;
|
||||
var DBG = (msg._||'').DBG; DBG && (DBG.sp = DBG.sp || +new Date);
|
||||
//var lot = (msg._||'').lot||''; count[id] = (count[id] || 0) + 1;
|
||||
var S = (msg._||'').RPS || ((msg._||'').RPS = +new Date);
|
||||
//console.log("PUT ------->>>", soul,key, val, state);
|
||||
//dare(soul+esc+key, {':': val, '>': state}, dare.one[id] || function(err, ok){
|
||||
dare(soul+esc+key, {':': val, '>': state}, function(err, ok){
|
||||
//console.log("<<<------- PAT", soul,key, val, state, 'in', +new Date - S);
|
||||
DBG && (DBG.spd = DBG.spd || +new Date);
|
||||
console.STAT && console.STAT(S, +new Date - S, 'put');
|
||||
//if(!err && count[id] !== lot.s){ console.log(err = "Disk count not same as ram count."); console.STAT && console.STAT(+new Date, lot.s - count[id], 'put ack != count') } delete count[id];
|
||||
if(err){ root.on('in', {'@': id, err: err, DBG: DBG}); return }
|
||||
root.on('in', {'@': id, ok: ok, DBG: DBG});
|
||||
//}, id, DBG && (DBG.r = DBG.r || {}));
|
||||
}, false && id, DBG && (DBG.r = DBG.r || {}));
|
||||
DBG && (DBG.sps = DBG.sps || +new Date);
|
||||
});
|
||||
var count = {}, obj_empty = Object.empty;
|
||||
|
||||
root.on('get', function(msg){
|
||||
this.to.next(msg);
|
||||
var ctx = msg._||'', DBG = ctx.DBG = msg.DBG; DBG && (DBG.sg = +new Date);
|
||||
var id = msg['#'], get = msg.get, soul = msg.get['#'], has = msg.get['.']||'', o = {}, graph, lex, key, tmp, force;
|
||||
if('string' == typeof soul){
|
||||
key = soul;
|
||||
} else
|
||||
if(soul){
|
||||
if(u !== (tmp = soul['*'])){ o.limit = force = 1 }
|
||||
if(u !== soul['>']){ o.start = soul['>'] }
|
||||
if(u !== soul['<']){ o.end = soul['<'] }
|
||||
key = force? (''+tmp) : tmp || soul['='];
|
||||
force = null;
|
||||
}
|
||||
if(key && !o.limit){ // a soul.has must be on a soul, and not during soul*
|
||||
if('string' == typeof has){
|
||||
key = key+esc+(o.atom = has);
|
||||
} else
|
||||
if(has){
|
||||
if(u !== has['>']){ o.start = has['>']; o.limit = 1 }
|
||||
if(u !== has['<']){ o.end = has['<']; o.limit = 1 }
|
||||
if(u !== (tmp = has['*'])){ o.limit = force = 1 }
|
||||
if(key){ key = key+esc + (force? (''+(tmp||'')) : tmp || (o.atom = has['='] || '')) }
|
||||
}
|
||||
}
|
||||
if((tmp = get['%']) || o.limit){
|
||||
o.limit = (tmp <= (o.pack || (1000 * 100)))? tmp : 1;
|
||||
}
|
||||
if(has['-'] || (soul||{})['-'] || get['-']){ o.reverse = true }
|
||||
if((tmp = (root.next||'')[soul]) && tmp.put){
|
||||
if(o.atom){
|
||||
tmp = (tmp.next||'')[o.atom] ;
|
||||
if(tmp && tmp.root && tmp.root.graph && tmp.root.graph[soul] && tmp.root.graph[soul][o.atom]){ return }
|
||||
} else
|
||||
if(tmp && tmp.rad){ return }
|
||||
}
|
||||
var now = Gun.state();
|
||||
var S = (+new Date), C = 0, SPT = 0; // STATS!
|
||||
DBG && (DBG.sgm = S);
|
||||
//var GID = String.random(3); console.log("GET ------->>>", GID, key, o, '?', get);
|
||||
dare(key||'', function(err, data, info){
|
||||
//console.log("<<<------- GOT", GID, +new Date - S, err, data);
|
||||
DBG && (DBG.sgr = +new Date);
|
||||
DBG && (DBG.sgi = info);
|
||||
try{opt.store.stats.get.time[statg % 50] = (+new Date) - S; ++statg;
|
||||
opt.store.stats.get.count++;
|
||||
if(err){ opt.store.stats.get.err = err }
|
||||
}catch(e){} // STATS!
|
||||
//if(u === data && info.chunks > 1){ return } // if we already sent a chunk, ignore ending empty responses. // this causes tests to fail.
|
||||
console.STAT && console.STAT(S, +new Date - S, 'got', JSON.stringify(key)); S = +new Date;
|
||||
info = info || '';
|
||||
var va, ve;
|
||||
if(info.unit && data && u !== (va = data[':']) && u !== (ve = data['>'])){ // new format
|
||||
var tmp = key.split(esc), so = tmp[0], ha = tmp[1];
|
||||
(graph = graph || {})[so] = Gun.state.ify(graph[so], ha, ve, va, so);
|
||||
root.$.get(so).get(ha)._.rad = now;
|
||||
// REMEMBER TO ADD _rad TO NODE/SOUL QUERY!
|
||||
} else
|
||||
if(data){ // old code path
|
||||
if(typeof data !== 'string'){
|
||||
if(o.atom){
|
||||
data = u;
|
||||
} else {
|
||||
Radix.map(data, each, o); // IS A RADIX TREE, NOT FUNCTION!
|
||||
}
|
||||
}
|
||||
if(!graph && data){ each(data, '') }
|
||||
// TODO: !has what about soul lookups?
|
||||
if(!o.atom && !has & 'string' == typeof soul && !o.limit && !o.more){
|
||||
root.$.get(soul)._.rad = now;
|
||||
}
|
||||
}
|
||||
DBG && (DBG.sgp = +new Date);
|
||||
// TODO: PERF NOTES! This is like 0.2s, but for each ack, or all? Can you cache these preps?
|
||||
// TODO: PERF NOTES! This is like 0.2s, but for each ack, or all? Can you cache these preps?
|
||||
// TODO: PERF NOTES! This is like 0.2s, but for each ack, or all? Can you cache these preps?
|
||||
// TODO: PERF NOTES! This is like 0.2s, but for each ack, or all? Can you cache these preps?
|
||||
// TODO: PERF NOTES! This is like 0.2s, but for each ack, or all? Can you cache these preps?
|
||||
// Or benchmark by reusing first start date.
|
||||
if(console.STAT && (ST = +new Date - S) > 9){ console.STAT(S, ST, 'got prep time'); console.STAT(S, C, 'got prep #') } SPT += ST; C = 0; S = +new Date;
|
||||
var faith = function(){}; faith.faith = true; faith.rad = get; // HNPERF: We're testing performance improvement by skipping going through security again, but this should be audited.
|
||||
root.on('in', {'@': id, put: graph, '%': info.more? 1 : u, err: err? err : u, _: faith, DBG: DBG});
|
||||
console.STAT && (ST = +new Date - S) > 9 && console.STAT(S, ST, 'got emit', Object.keys(graph||{}).length);
|
||||
graph = u; // each is outside our scope, we have to reset graph to nothing!
|
||||
}, o, DBG && (DBG.r = DBG.r || {}));
|
||||
DBG && (DBG.sgd = +new Date);
|
||||
console.STAT && (ST = +new Date - S) > 9 && console.STAT(S, ST, 'get call'); // TODO: Perf: this was half a second??????
|
||||
function each(val, has, a,b){ // TODO: THIS CODE NEEDS TO BE FASTER!!!!
|
||||
C++;
|
||||
if(!val){ return }
|
||||
has = (key+has).split(esc);
|
||||
var soul = has.slice(0,1)[0];
|
||||
has = has.slice(-1)[0];
|
||||
if(o.limit && o.limit <= o.count){ return true }
|
||||
var va, ve, so = soul, ha = has;
|
||||
//if(u !== (va = val[':']) && u !== (ve = val['>'])){ // THIS HANDLES NEW CODE!
|
||||
if('string' != typeof val){ // THIS HANDLES NEW CODE!
|
||||
va = val[':']; ve = val['>'];
|
||||
(graph = graph || {})[so] = Gun.state.ify(graph[so], ha, ve, va, so);
|
||||
//root.$.get(so).get(ha)._.rad = now;
|
||||
o.count = (o.count || 0) + ((va||'').length || 9);
|
||||
return;
|
||||
}
|
||||
o.count = (o.count || 0) + val.length;
|
||||
var tmp = val.lastIndexOf('>');
|
||||
var state = Radisk.decode(val.slice(tmp+1), null, esc);
|
||||
val = Radisk.decode(val.slice(0,tmp), null, esc);
|
||||
(graph = graph || {})[soul] = Gun.state.ify(graph[soul], has, state, val, soul);
|
||||
}
|
||||
});
|
||||
var val_is = Gun.valid;
|
||||
(opt.store||{}).stats = {get:{time:{}, count:0}, put: {time:{}, count:0}}; // STATS!
|
||||
var statg = 0, statp = 0; // STATS!
|
||||
});
|
||||
@@ -1,58 +0,0 @@
|
||||
(function (env) {
|
||||
var Gun;
|
||||
if(typeof module !== "undefined" && module.exports){ Gun = require('gun/gun') }
|
||||
if(typeof window !== "undefined"){ Gun = window.Gun }
|
||||
|
||||
Gun.chain.sync = function (obj, opt, cb, o) {
|
||||
var gun = this;
|
||||
if (!Gun.obj.is(obj)) {
|
||||
console.log('First param is not an object');
|
||||
return gun;
|
||||
}
|
||||
if (Gun.bi.is(opt)) {
|
||||
opt = {
|
||||
meta: opt
|
||||
};
|
||||
}
|
||||
if(Gun.fn.is(opt)){
|
||||
cb = opt;
|
||||
opt = null;
|
||||
}
|
||||
cb = cb || function(){};
|
||||
opt = opt || {};
|
||||
opt.ctx = opt.ctx || {};
|
||||
gun.on(function (change, field) {
|
||||
Gun.obj.map(change, function (val, field) {
|
||||
if (!obj) {
|
||||
return;
|
||||
}
|
||||
if (field === '_' || field === '#') {
|
||||
if (opt.meta) {
|
||||
obj[field] = val;
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (Gun.obj.is(val)) {
|
||||
var soul = Gun.val.rel.is(val);
|
||||
if (opt.ctx[soul + field]) {
|
||||
// don't re-subscribe.
|
||||
return;
|
||||
}
|
||||
// unique subscribe!
|
||||
opt.ctx[soul + field] = true;
|
||||
this.path(field).sync(
|
||||
obj[field] = (obj[field] || {}),
|
||||
Gun.obj.copy(opt),
|
||||
cb,
|
||||
o || obj
|
||||
);
|
||||
return;
|
||||
}
|
||||
obj[field] = val;
|
||||
}, this);
|
||||
cb(o || obj);
|
||||
});
|
||||
return gun;
|
||||
};
|
||||
|
||||
}());
|
||||
@@ -1,134 +0,0 @@
|
||||
;(function(){
|
||||
var GUN = (typeof window !== "undefined")? window.Gun : require('../gun');
|
||||
GUN.on('opt', function(root){
|
||||
this.to.next(root);
|
||||
var opt = root.opt;
|
||||
if(root.once){ return }
|
||||
if(!GUN.Mesh){ return }
|
||||
if(false === opt.RTCPeerConnection){ return }
|
||||
|
||||
var env;
|
||||
if(typeof window !== "undefined"){ env = window }
|
||||
if(typeof global !== "undefined"){ env = global }
|
||||
env = env || {};
|
||||
|
||||
var rtcpc = opt.RTCPeerConnection || env.RTCPeerConnection || env.webkitRTCPeerConnection || env.mozRTCPeerConnection;
|
||||
var rtcsd = opt.RTCSessionDescription || env.RTCSessionDescription || env.webkitRTCSessionDescription || env.mozRTCSessionDescription;
|
||||
var rtcic = opt.RTCIceCandidate || env.RTCIceCandidate || env.webkitRTCIceCandidate || env.mozRTCIceCandidate;
|
||||
if(!rtcpc || !rtcsd || !rtcic){ return }
|
||||
opt.RTCPeerConnection = rtcpc;
|
||||
opt.RTCSessionDescription = rtcsd;
|
||||
opt.RTCIceCandidate = rtcic;
|
||||
opt.rtc = opt.rtc || {'iceServers': [
|
||||
{urls: 'stun:stun.l.google.com:19302'},
|
||||
{urls: 'stun:stun.cloudflare.com:3478'}/*,
|
||||
{urls: "stun:stun.sipgate.net:3478"},
|
||||
{urls: "stun:stun.stunprotocol.org"},
|
||||
{urls: "stun:stun.sipgate.net:10000"},
|
||||
{urls: "stun:217.10.68.152:10000"},
|
||||
{urls: 'stun:stun.services.mozilla.com'}*/
|
||||
]};
|
||||
// TODO: Select the most appropriate stuns.
|
||||
// FIXME: Find the wire throwing ICE Failed
|
||||
// The above change corrects at least firefox RTC Peer handler where it **throws** on over 6 ice servers, and updates url: to urls: removing deprecation warning
|
||||
opt.rtc.dataChannel = opt.rtc.dataChannel || {ordered: false, maxRetransmits: 2};
|
||||
opt.rtc.sdp = opt.rtc.sdp || {mandatory: {OfferToReceiveAudio: false, OfferToReceiveVideo: false}};
|
||||
opt.rtc.max = opt.rtc.max || 55; // is this a magic number? // For Future WebRTC notes: Chrome 500 max limit, however 256 likely - FF "none", webtorrent does 55 per torrent.
|
||||
opt.rtc.room = opt.rtc.room || GUN.window && (window.rtcRoom || location.hash.slice(1) || location.pathname.slice(1));
|
||||
opt.announce = function(to){
|
||||
opt.rtc.start = +new Date; // handle room logic:
|
||||
root.$.get('/RTC/'+opt.rtc.room+'<?99').get('+').put(opt.pid, function(ack){
|
||||
if(!ack.ok || !ack.ok.rtc){ return }
|
||||
plan(ack);
|
||||
}, {acks: opt.rtc.max}).on(function(last,key, msg){
|
||||
if(last === opt.pid || opt.rtc.start > msg.put['>']){ return }
|
||||
plan({'#': ''+msg['#'], ok: {rtc: {id: last}}});
|
||||
});
|
||||
};
|
||||
|
||||
var mesh = opt.mesh = opt.mesh || GUN.Mesh(root), wired = mesh.wire;
|
||||
mesh.hear['rtc'] = plan;
|
||||
mesh.wire = function(media){ try{ wired && wired(media);
|
||||
if(!(media instanceof MediaStream)){ return }
|
||||
(open.media = open.media||{})[media.id] = media;
|
||||
for(var p in opt.peers){ p = opt.peers[p]||'';
|
||||
p.addTrack && media.getTracks().forEach(track => {
|
||||
p.addTrack(track, media);
|
||||
});
|
||||
p.createOffer && p.createOffer(function(offer){
|
||||
p.setLocalDescription(offer);
|
||||
mesh.say({'#': root.ask(plan), dam: 'rtc', ok: {rtc: {offer: offer, id: opt.pid}}}, p);
|
||||
}, function(){}, opt.rtc.sdp);
|
||||
}
|
||||
} catch(e){console.log(e)} }
|
||||
root.on('create', function(at){
|
||||
this.to.next(at);
|
||||
setTimeout(opt.announce, 1);
|
||||
});
|
||||
|
||||
function plan(msg){
|
||||
if(!msg.ok){ return }
|
||||
var rtc = msg.ok.rtc, peer, tmp;
|
||||
if(!rtc || !rtc.id || rtc.id === opt.pid){ return }
|
||||
peer = open(msg, rtc);
|
||||
if(tmp = rtc.candidate){
|
||||
return peer.addIceCandidate(new opt.RTCIceCandidate(tmp));
|
||||
}
|
||||
if(tmp = rtc.answer){
|
||||
tmp.sdp = tmp.sdp.replace(/\\r\\n/g, '\r\n');
|
||||
return peer.setRemoteDescription(peer.remoteSet = new opt.RTCSessionDescription(tmp));
|
||||
}
|
||||
if(tmp = rtc.offer){
|
||||
rtc.offer.sdp = rtc.offer.sdp.replace(/\\r\\n/g, '\r\n');
|
||||
peer.setRemoteDescription(new opt.RTCSessionDescription(tmp));
|
||||
return peer.createAnswer(function(answer){
|
||||
peer.setLocalDescription(answer);
|
||||
root.on('out', {'@': msg['#'], ok: {rtc: {answer: answer, id: opt.pid}}});
|
||||
}, function(){}, opt.rtc.sdp);
|
||||
}
|
||||
}
|
||||
function open(msg, rtc, peer){
|
||||
if(peer = opt.peers[rtc.id] || open[rtc.id]){ return peer }
|
||||
(peer = new opt.RTCPeerConnection(opt.rtc)).id = rtc.id;
|
||||
var wire = peer.wire = peer.createDataChannel('dc', opt.rtc.dataChannel);
|
||||
function rtceve(eve){ eve.peer = peer; gun.on('rtc', eve) }
|
||||
peer.$ = gun;
|
||||
open[rtc.id] = peer;
|
||||
peer.ontrack = rtceve;
|
||||
peer.onremovetrack = rtceve;
|
||||
peer.onconnectionstatechange = rtceve;
|
||||
wire.to = setTimeout(function(){delete open[rtc.id]},1000*60);
|
||||
wire.onclose = function(){ mesh.bye(peer) };
|
||||
wire.onerror = function(err){ };
|
||||
wire.onopen = function(e){
|
||||
delete open[rtc.id];
|
||||
mesh.hi(peer);
|
||||
}
|
||||
wire.onmessage = function(msg){
|
||||
if(!msg){ return }
|
||||
mesh.hear(msg.data || msg, peer);
|
||||
};
|
||||
peer.onicecandidate = function(e){ rtceve(e);
|
||||
if(!e.candidate){ return }
|
||||
root.on('out', {'@': (msg||'')['#'], '#': root.ask(plan), ok: {rtc: {candidate: e.candidate, id: opt.pid}}});
|
||||
}
|
||||
peer.ondatachannel = function(e){ rtceve(e);
|
||||
var rc = e.channel;
|
||||
rc.onmessage = wire.onmessage;
|
||||
rc.onopen = wire.onopen;
|
||||
rc.onclose = wire.onclose;
|
||||
}
|
||||
if(rtc.offer){ return peer }
|
||||
for(var m in open.media){ m = open.media[m];
|
||||
m.getTracks().forEach(track => {
|
||||
peer.addTrack(track, m);
|
||||
});
|
||||
}
|
||||
peer.createOffer(function(offer){
|
||||
peer.setLocalDescription(offer);
|
||||
root.on('out', {'@': (msg||'')['#'], '#': root.ask(plan), ok: {rtc: {offer: offer, id: opt.pid}}});
|
||||
}, function(){}, opt.rtc.sdp);
|
||||
return peer;
|
||||
}
|
||||
});
|
||||
}());
|
||||
@@ -1,244 +0,0 @@
|
||||
;(function(){
|
||||
// JSON: JavaScript Object Notation
|
||||
// YSON: Yielding javaScript Object Notation
|
||||
var yson = {}, u, sI = setTimeout.turn || (typeof setImmediate != ''+u && setImmediate) || setTimeout;
|
||||
|
||||
yson.parseAsync = function(text, done, revive, M){
|
||||
if('string' != typeof text){ try{ done(u,JSON.parse(text)) }catch(e){ done(e) } return }
|
||||
var ctx = {i: 0, text: text, done: done, l: text.length, up: []};
|
||||
//M = 1024 * 1024 * 100;
|
||||
//M = M || 1024 * 64;
|
||||
M = M || 1024 * 32;
|
||||
parse();
|
||||
function parse(){
|
||||
//var S = +new Date;
|
||||
var s = ctx.text;
|
||||
var i = ctx.i, l = ctx.l, j = 0;
|
||||
var w = ctx.w, b, tmp;
|
||||
while(j++ < M){
|
||||
var c = s[i++];
|
||||
if(i > l){
|
||||
ctx.end = true;
|
||||
break;
|
||||
}
|
||||
if(w){
|
||||
i = s.indexOf('"', i-1); c = s[i];
|
||||
tmp = 0; while('\\' == s[i-(++tmp)]){}; tmp = !(tmp % 2);//tmp = ('\\' == s[i-1]); // json is stupid
|
||||
b = b || tmp;
|
||||
if('"' == c && !tmp){
|
||||
w = u;
|
||||
tmp = ctx.s;
|
||||
if(ctx.a){
|
||||
tmp = s.slice(ctx.sl, i);
|
||||
if(b || (1+tmp.indexOf('\\'))){ tmp = JSON.parse('"'+tmp+'"') } // escape + unicode :( handling
|
||||
if(ctx.at instanceof Array){
|
||||
ctx.at.push(ctx.s = tmp);
|
||||
} else {
|
||||
if(!ctx.at){ ctx.end = j = M; tmp = u }
|
||||
(ctx.at||{})[ctx.s] = ctx.s = tmp;
|
||||
}
|
||||
ctx.s = u;
|
||||
} else {
|
||||
ctx.s = s.slice(ctx.sl, i);
|
||||
if(b || (1+ctx.s.indexOf('\\'))){ ctx.s = JSON.parse('"'+ctx.s+'"'); } // escape + unicode :( handling
|
||||
}
|
||||
ctx.a = b = u;
|
||||
}
|
||||
++i;
|
||||
} else {
|
||||
switch(c){
|
||||
case '"':
|
||||
ctx.sl = i;
|
||||
w = true;
|
||||
break;
|
||||
case ':':
|
||||
ctx.ai = i;
|
||||
ctx.a = true;
|
||||
break;
|
||||
case ',':
|
||||
if(ctx.a || ctx.at instanceof Array){
|
||||
if(tmp = s.slice(ctx.ai, i-1)){
|
||||
if(u !== (tmp = value(tmp))){
|
||||
if(ctx.at instanceof Array){
|
||||
ctx.at.push(tmp);
|
||||
} else {
|
||||
ctx.at[ctx.s] = tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ctx.a = u;
|
||||
if(ctx.at instanceof Array){
|
||||
ctx.a = true;
|
||||
ctx.ai = i;
|
||||
}
|
||||
break;
|
||||
case '{':
|
||||
ctx.up.push(ctx.at||(ctx.at = {}));
|
||||
if(ctx.at instanceof Array){
|
||||
ctx.at.push(ctx.at = {});
|
||||
} else
|
||||
if(u !== (tmp = ctx.s)){
|
||||
ctx.at[tmp] = ctx.at = {};
|
||||
}
|
||||
ctx.a = u;
|
||||
break;
|
||||
case '}':
|
||||
if(ctx.a){
|
||||
if(tmp = s.slice(ctx.ai, i-1)){
|
||||
if(u !== (tmp = value(tmp))){
|
||||
if(ctx.at instanceof Array){
|
||||
ctx.at.push(tmp);
|
||||
} else {
|
||||
if(!ctx.at){ ctx.end = j = M; tmp = u }
|
||||
(ctx.at||{})[ctx.s] = tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ctx.a = u;
|
||||
ctx.at = ctx.up.pop();
|
||||
break;
|
||||
case '[':
|
||||
if(u !== (tmp = ctx.s)){
|
||||
ctx.up.push(ctx.at);
|
||||
ctx.at[tmp] = ctx.at = [];
|
||||
} else
|
||||
if(!ctx.at){
|
||||
ctx.up.push(ctx.at = []);
|
||||
}
|
||||
ctx.a = true;
|
||||
ctx.ai = i;
|
||||
break;
|
||||
case ']':
|
||||
if(ctx.a){
|
||||
if(tmp = s.slice(ctx.ai, i-1)){
|
||||
if(u !== (tmp = value(tmp))){
|
||||
if(ctx.at instanceof Array){
|
||||
ctx.at.push(tmp);
|
||||
} else {
|
||||
ctx.at[ctx.s] = tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ctx.a = u;
|
||||
ctx.at = ctx.up.pop();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
ctx.s = u;
|
||||
ctx.i = i;
|
||||
ctx.w = w;
|
||||
if(ctx.end){
|
||||
tmp = ctx.at;
|
||||
if(u === tmp){
|
||||
try{ tmp = JSON.parse(text)
|
||||
}catch(e){ return ctx.done(e) }
|
||||
}
|
||||
ctx.done(u, tmp);
|
||||
} else {
|
||||
sI(parse);
|
||||
}
|
||||
}
|
||||
}
|
||||
function value(s){
|
||||
var n = parseFloat(s);
|
||||
if(!isNaN(n)){
|
||||
return n;
|
||||
}
|
||||
s = s.trim();
|
||||
if('true' == s){
|
||||
return true;
|
||||
}
|
||||
if('false' == s){
|
||||
return false;
|
||||
}
|
||||
if('null' == s){
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
yson.stringifyAsync = function(data, done, replacer, space, ctx){
|
||||
//try{done(u, JSON.stringify(data, replacer, space))}catch(e){done(e)}return;
|
||||
ctx = ctx || {};
|
||||
ctx.text = ctx.text || "";
|
||||
ctx.up = [ctx.at = {d: data}];
|
||||
ctx.done = done;
|
||||
ctx.i = 0;
|
||||
var j = 0;
|
||||
ify();
|
||||
function ify(){
|
||||
var at = ctx.at, data = at.d, add = '', tmp;
|
||||
if(at.i && (at.i - at.j) > 0){ add += ',' }
|
||||
if(u !== (tmp = at.k)){ add += JSON.stringify(tmp) + ':' } //'"'+tmp+'":' } // only if backslash
|
||||
switch(typeof data){
|
||||
case 'boolean':
|
||||
add += ''+data;
|
||||
break;
|
||||
case 'string':
|
||||
add += JSON.stringify(data); //ctx.text += '"'+data+'"';//JSON.stringify(data); // only if backslash
|
||||
break;
|
||||
case 'number':
|
||||
add += (isNaN(data)? 'null' : data);
|
||||
break;
|
||||
case 'object':
|
||||
if(!data){
|
||||
add += 'null';
|
||||
break;
|
||||
}
|
||||
if(data instanceof Array){
|
||||
add += '[';
|
||||
at = {i: -1, as: data, up: at, j: 0};
|
||||
at.l = data.length;
|
||||
ctx.up.push(ctx.at = at);
|
||||
break;
|
||||
}
|
||||
if('function' != typeof (data||'').toJSON){
|
||||
add += '{';
|
||||
at = {i: -1, ok: Object.keys(data).sort(), as: data, up: at, j: 0};
|
||||
at.l = at.ok.length;
|
||||
ctx.up.push(ctx.at = at);
|
||||
break;
|
||||
}
|
||||
if(tmp = data.toJSON()){
|
||||
add += tmp;
|
||||
break;
|
||||
}
|
||||
// let this & below pass into default case...
|
||||
case 'function':
|
||||
if(at.as instanceof Array){
|
||||
add += 'null';
|
||||
break;
|
||||
}
|
||||
default: // handle wrongly added leading `,` if previous item not JSON-able.
|
||||
add = '';
|
||||
at.j++;
|
||||
}
|
||||
ctx.text += add;
|
||||
while(1+at.i >= at.l){
|
||||
ctx.text += (at.ok? '}' : ']');
|
||||
at = ctx.at = at.up;
|
||||
}
|
||||
if(++at.i < at.l){
|
||||
if(tmp = at.ok){
|
||||
at.d = at.as[at.k = tmp[at.i]];
|
||||
} else {
|
||||
at.d = at.as[at.i];
|
||||
}
|
||||
if(++j < 9){ return ify() } else { j = 0 }
|
||||
sI(ify);
|
||||
return;
|
||||
}
|
||||
ctx.done(u, ctx.text);
|
||||
}
|
||||
}
|
||||
if(typeof window != ''+u){ window.YSON = yson }
|
||||
try{ if(typeof module != ''+u){ module.exports = yson } }catch(e){}
|
||||
if(typeof JSON != ''+u){
|
||||
JSON.parseAsync = yson.parseAsync;
|
||||
JSON.stringifyAsync = yson.stringifyAsync;
|
||||
}
|
||||
|
||||
}());
|
||||
Reference in New Issue
Block a user