Bugfix: Import traceback for SQL sharelogger
[bitcoin:eloipool.git] / eloipool.py
1 #!/usr/bin/python3
2 # Eloipool - Python Bitcoin pool server
3 # Copyright (C) 2011-2012  Luke Dashjr <luke-jr+eloipool@utopios.org>
4 #
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License as
7 # published by the Free Software Foundation, either version 3 of the
8 # License, or (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 # GNU Affero General Public License for more details.
14 #
15 # You should have received a copy of the GNU Affero General Public License
16 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
17
18 import config
19
20 if not hasattr(config, 'ServerName'):
21         config.ServerName = 'Unnamed Eloipool'
22
23 if not hasattr(config, 'ShareTarget'):
24         config.ShareTarget = 0x00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
25
26
27 import logging
28
29 if len(logging.root.handlers) == 0:
30         logging.basicConfig(
31                 format='%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s',
32                 level=logging.DEBUG,
33         )
34         for infoOnly in ('checkShare', 'JSONRPCHandler', 'merkleMaker', 'Waker for JSONRPCServer', 'JSONRPCServer'):
35                 logging.getLogger(infoOnly).setLevel(logging.INFO)
36
37 def RaiseRedFlags(reason):
38         logging.getLogger('redflag').critical(reason)
39         return reason
40
41
42 from bitcoin.node import BitcoinLink, BitcoinNode
43 bcnode = BitcoinNode(config.UpstreamNetworkId)
44 bcnode.userAgent += b'Eloipool:0.1/'
45
46 import jsonrpc
47 UpstreamBitcoindJSONRPC = jsonrpc.ServiceProxy(config.UpstreamURI)
48
49
50 from bitcoin.script import BitcoinScript
51 from bitcoin.txn import Txn
52 from base58 import b58decode
53 from struct import pack
54 import subprocess
55 from time import time
56
57 def makeCoinbaseTxn(coinbaseValue, useCoinbaser = True):
58         txn = Txn.new()
59         
60         if useCoinbaser and hasattr(config, 'CoinbaserCmd') and config.CoinbaserCmd:
61                 coinbased = 0
62                 try:
63                         cmd = config.CoinbaserCmd
64                         cmd = cmd.replace('%d', str(coinbaseValue))
65                         p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
66                         nout = int(p.stdout.readline())
67                         for i in range(nout):
68                                 amount = int(p.stdout.readline())
69                                 addr = p.stdout.readline().rstrip(b'\n').decode('utf8')
70                                 pkScript = BitcoinScript.toAddress(addr)
71                                 txn.addOutput(amount, pkScript)
72                                 coinbased += amount
73                 except:
74                         coinbased = coinbaseValue + 1
75                 if coinbased >= coinbaseValue:
76                         logging.getLogger('makeCoinbaseTxn').error('Coinbaser failed!')
77                         txn.outputs = []
78                 else:
79                         coinbaseValue -= coinbased
80         
81         pkScript = BitcoinScript.toAddress(config.TrackerAddr)
82         txn.addOutput(coinbaseValue, pkScript)
83         
84         # TODO
85         # TODO: red flag on dupe coinbase
86         return txn
87
88
89 import jsonrpc_getwork
90 from util import Bits2Target
91
92 workLog = {}
93 userStatus = {}
94 networkTarget = None
95 DupeShareHACK = {}
96
97 server = None
98 def updateBlocks():
99         server.wakeLongpoll()
100
101 def blockChanged():
102         global DupeShareHACK
103         DupeShareHACK = {}
104         jsonrpc_getwork._CheckForDupesHACK = {}
105         global MM, networkTarget, server
106         bits = MM.currentBlock[2]
107         if bits is None:
108                 networkTarget = None
109         else:
110                 networkTarget = Bits2Target(bits)
111         workLog.clear()
112         server.wakeLongpoll(wantClear=True)
113
114
115 from time import sleep, time
116 import traceback
117
118 def _WorkLogPruner_I(wl):
119         now = time()
120         pruned = 0
121         for username in wl:
122                 userwork = wl[username]
123                 for wli in tuple(userwork.keys()):
124                         if now > userwork[wli][1] + 120:
125                                 del userwork[wli]
126                                 pruned += 1
127         WorkLogPruner.logger.debug('Pruned %d jobs' % (pruned,))
128
129 def WorkLogPruner(wl):
130         while True:
131                 try:
132                         sleep(60)
133                         _WorkLogPruner_I(wl)
134                 except:
135                         WorkLogPruner.logger.error(traceback.format_exc())
136 WorkLogPruner.logger = logging.getLogger('WorkLogPruner')
137
138
139 from merklemaker import merkleMaker
140 MM = merkleMaker()
141 MM.__dict__.update(config.__dict__)
142 MM.clearCoinbaseTxn = makeCoinbaseTxn(5000000000, False)  # FIXME
143 MM.clearCoinbaseTxn.assemble()
144 MM.makeCoinbaseTxn = makeCoinbaseTxn
145 MM.onBlockChange = blockChanged
146 MM.onBlockUpdate = updateBlocks
147
148
149 from binascii import b2a_hex
150 from copy import deepcopy
151 from math import log
152 from merklemaker import MakeBlockHeader
153 from struct import pack, unpack
154 import threading
155 from time import time
156 from util import RejectedShare, dblsha, hash2int, swap32, target2pdiff
157 import jsonrpc
158 import traceback
159
160 gotwork = None
161 if hasattr(config, 'GotWorkURI'):
162         gotwork = jsonrpc.ServiceProxy(config.GotWorkURI)
163
164 if not hasattr(config, 'DynamicTargetting'):
165         config.DynamicTargetting = 0
166 else:
167         if not hasattr(config, 'DynamicTargetWindow'):
168                 config.DynamicTargetWindow = 120
169         config.DynamicTargetGoal *= config.DynamicTargetWindow / 60
170
171 def submitGotwork(info):
172         try:
173                 gotwork.gotwork(info)
174         except:
175                 checkShare.logger.warning('Failed to submit gotwork\n' + traceback.format_exc())
176
177 def getTarget(username, now):
178         if not config.DynamicTargetting:
179                 return None
180         if username in userStatus:
181                 status = userStatus[username]
182         else:
183                 userStatus[username] = [None, now, 0]
184                 return None
185         (targetIn, lastUpdate, work) = status
186         if work <= config.DynamicTargetGoal:
187                 if now < lastUpdate + config.DynamicTargetWindow and (targetIn is None or targetIn >= networkTarget):
188                         return targetIn
189                 if not work:
190                         if targetIn:
191                                 getTarget.logger.debug("No shares from '%s', resetting to minimum target")
192                                 userStatus[username] = [None, now, 0]
193                         return None
194         
195         deltaSec = now - lastUpdate
196         target = targetIn or config.ShareTarget
197         target = int(target * config.DynamicTargetGoal * deltaSec / config.DynamicTargetWindow / work)
198         if target >= config.ShareTarget:
199                 target = None
200         else:
201                 if target < networkTarget:
202                         target = networkTarget
203                 if config.DynamicTargetting == 2:
204                         # Round target to a power of two :)
205                         target = 2**int(log(target, 2) + 1) - 1
206                 if target == config.ShareTarget:
207                         target = None
208         if target != targetIn:
209                 pfx = 'Retargetting %s' % (repr(username),)
210                 tin = targetIn or config.ShareTarget
211                 getTarget.logger.debug("%s from: %064x (pdiff %s)" % (pfx, tin, target2pdiff(tin)))
212                 tgt = target or config.ShareTarget
213                 getTarget.logger.debug("%s   to: %064x (pdiff %s)" % (pfx, tgt, target2pdiff(tgt)))
214         userStatus[username] = [target, now, 0]
215         return target
216 getTarget.logger = logging.getLogger('getTarget')
217
218 def RegisterWork(username, wli, wld):
219         now = time()
220         target = getTarget(username, now)
221         wld = tuple(wld) + (target,)
222         workLog.setdefault(username, {})[wli] = (wld, now)
223         return target or config.ShareTarget
224
225 def getBlockHeader(username):
226         MRD = MM.getMRD()
227         merkleRoot = MRD[0]
228         hdr = MakeBlockHeader(MRD)
229         workLog.setdefault(username, {})[merkleRoot] = (MRD, time())
230         target = RegisterWork(username, merkleRoot, MRD)
231         return (hdr, workLog[username][merkleRoot], target)
232
233 def getBlockTemplate(username, p_magic = None):
234         if server.tls.wantClear:
235                 wantClear = True
236         elif p_magic and username not in workLog:
237                 wantClear = True
238                 p_magic[0] = True
239         else:
240                 wantClear = False
241         MC = MM.getMC(wantClear)
242         (dummy, merkleTree, coinbase, prevBlock, bits) = MC[:5]
243         wliPos = coinbase[0] + 2
244         wliLen = coinbase[wliPos - 1]
245         wli = coinbase[wliPos:wliPos+wliLen]
246         target = RegisterWork(username, wli, MC)
247         return (MC, workLog[username][wli], target)
248
249 loggersShare = []
250
251 RBDs = []
252 RBPs = []
253
254 from bitcoin.varlen import varlenEncode, varlenDecode
255 import bitcoin.txn
256 from merklemaker import assembleBlock
257
258 RBFs = []
259 def blockSubmissionThread(payload, blkhash, share):
260         myblock = (blkhash, payload[4:36])
261         payload = b2a_hex(payload).decode('ascii')
262         nexterr = 0
263         gmperr = None
264         while True:
265                 try:
266                         rv = UpstreamBitcoindJSONRPC.submitblock(payload)
267                         break
268                 except BaseException as gbterr:
269                         try:
270                                 rv = UpstreamBitcoindJSONRPC.getmemorypool(payload)
271                                 if rv is True:
272                                         rv = None
273                                 elif rv is False:
274                                         rv = 'rejected'
275                                 break
276                         except BaseException as e2:
277                                 gmperr = e2
278                         now = time()
279                         if now > nexterr:
280                                 # FIXME: This will show "Method not found" on pre-BIP22 servers
281                                 RaiseRedFlags(traceback.format_exc())
282                                 nexterr = now + 5
283                         if MM.currentBlock[0] not in myblock:
284                                 RBFs.append( (('next block', MM.currentBlock, now, (gbterr, gmperr)), payload, blkhash, share) )
285                                 RaiseRedFlags('Giving up on submitting block upstream')
286                                 return
287         if rv:
288                 # FIXME: The returned value could be a list of multiple responses
289                 RBFs.append( (('upstream reject', rv, time()), payload, blkhash, share) )
290                 RaiseRedFlags('Upstream block submission failed: %s' % (rv,))
291
292 def checkShare(share):
293         shareTime = share['time'] = time()
294         
295         data = share['data']
296         data = data[:80]
297         (prevBlock, height, bits) = MM.currentBlock
298         sharePrevBlock = data[4:36]
299         if sharePrevBlock != prevBlock:
300                 if sharePrevBlock == MM.lastBlock[0]:
301                         raise RejectedShare('stale-prevblk')
302                 raise RejectedShare('bad-prevblk')
303         
304         # TODO: use userid
305         username = share['username']
306         if username not in workLog:
307                 raise RejectedShare('unknown-user')
308         
309         if data[72:76] != bits:
310                 raise RejectedShare('bad-diffbits')
311         
312         # Note that we should accept miners reducing version to 1 if they don't understand 2 yet
313         # FIXME: When the supermajority is upgraded to version 2, stop accepting 1!
314         if data[1:4] != b'\0\0\0' or data[0] > 2:
315                 raise RejectedShare('bad-version')
316         
317         shareMerkleRoot = data[36:68]
318         if 'blkdata' in share:
319                 pl = share['blkdata']
320                 (txncount, pl) = varlenDecode(pl)
321                 cbtxn = bitcoin.txn.Txn(pl)
322                 othertxndata = cbtxn.disassemble(retExtra=True)
323                 coinbase = cbtxn.getCoinbase()
324                 wliPos = coinbase[0] + 2
325                 wliLen = coinbase[wliPos - 1]
326                 wli = coinbase[wliPos:wliPos+wliLen]
327                 mode = 'MC'
328                 moden = 1
329         else:
330                 wli = shareMerkleRoot
331                 mode = 'MRD'
332                 moden = 0
333                 coinbase = None
334         
335         MWL = workLog[username]
336         if wli not in MWL:
337                 raise RejectedShare('unknown-work')
338         (wld, issueT) = MWL[wli]
339         share[mode] = wld
340         
341         share['issuetime'] = issueT
342         
343         if data in DupeShareHACK:
344                 raise RejectedShare('duplicate')
345         DupeShareHACK[data] = None
346         
347         blkhash = dblsha(data)
348         if blkhash[28:] != b'\0\0\0\0':
349                 raise RejectedShare('H-not-zero')
350         blkhashn = hash2int(blkhash)
351         
352         global networkTarget
353         logfunc = getattr(checkShare.logger, 'info' if blkhashn <= networkTarget else 'debug')
354         logfunc('BLKHASH: %64x' % (blkhashn,))
355         logfunc(' TARGET: %64x' % (networkTarget,))
356         
357         workMerkleTree = wld[1]
358         workCoinbase = wld[2]
359         workTarget = wld[6] if len(wld) > 6 else None
360         
361         # NOTE: this isn't actually needed for MC mode, but we're abusing it for a trivial share check...
362         txlist = workMerkleTree.data
363         txlist = [deepcopy(txlist[0]),] + txlist[1:]
364         cbtxn = txlist[0]
365         cbtxn.setCoinbase(coinbase or workCoinbase)
366         cbtxn.assemble()
367         
368         if blkhashn <= networkTarget:
369                 logfunc("Submitting upstream")
370                 RBDs.append( deepcopy( (data, txlist, share.get('blkdata', None), workMerkleTree, share, wld) ) )
371                 if not moden:
372                         payload = assembleBlock(data, txlist)
373                 else:
374                         payload = share['data']
375                         if len(othertxndata):
376                                 payload += share['blkdata']
377                         else:
378                                 payload += assembleBlock(data, txlist)[80:]
379                 logfunc('Real block payload: %s' % (b2a_hex(payload).decode('utf8'),))
380                 RBPs.append(payload)
381                 threading.Thread(target=blockSubmissionThread, args=(payload, blkhash, share)).start()
382                 bcnode.submitBlock(payload)
383                 share['upstreamResult'] = True
384                 MM.updateBlock(blkhash)
385         
386         # Gotwork hack...
387         if gotwork and blkhashn <= config.GotWorkTarget:
388                 try:
389                         coinbaseMrkl = cbtxn.data
390                         coinbaseMrkl += blkhash
391                         steps = workMerkleTree._steps
392                         coinbaseMrkl += pack('B', len(steps))
393                         for step in steps:
394                                 coinbaseMrkl += step
395                         coinbaseMrkl += b"\0\0\0\0"
396                         info = {}
397                         info['hash'] = b2a_hex(blkhash).decode('ascii')
398                         info['header'] = b2a_hex(data).decode('ascii')
399                         info['coinbaseMrkl'] = b2a_hex(coinbaseMrkl).decode('ascii')
400                         thr = threading.Thread(target=submitGotwork, args=(info,))
401                         thr.daemon = True
402                         thr.start()
403                 except:
404                         checkShare.logger.warning('Failed to build gotwork request')
405         
406         if workTarget is None:
407                 workTarget = config.ShareTarget
408         if blkhashn > workTarget:
409                 raise RejectedShare('high-hash')
410         share['target'] = workTarget
411         share['_targethex'] = '%064x' % (workTarget,)
412         
413         shareTimestamp = unpack('<L', data[68:72])[0]
414         if shareTime < issueT - 120:
415                 raise RejectedShare('stale-work')
416         if shareTimestamp < shareTime - 300:
417                 raise RejectedShare('time-too-old')
418         if shareTimestamp > shareTime + 7200:
419                 raise RejectedShare('time-too-new')
420         
421         if config.DynamicTargetting and username in userStatus:
422                 # NOTE: userStatus[username] only doesn't exist across restarts
423                 status = userStatus[username]
424                 target = status[0] or config.ShareTarget
425                 if target == workTarget:
426                         userStatus[username][2] += 1
427                 else:
428                         userStatus[username][2] += float(target) / workTarget
429         
430         if moden:
431                 cbpre = workCoinbase
432                 cbpreLen = len(cbpre)
433                 if coinbase[:cbpreLen] != cbpre:
434                         raise RejectedShare('bad-cb-prefix')
435                 
436                 # Filter out known "I support" flags, to prevent exploits
437                 for ff in (b'/P2SH/', b'NOP2SH', b'p2sh/CHV', b'p2sh/NOCHV'):
438                         if coinbase.find(ff) > max(-1, cbpreLen - len(ff)):
439                                 raise RejectedShare('bad-cb-flag')
440                 
441                 if len(coinbase) > 100:
442                         raise RejectedShare('bad-cb-length')
443                 
444                 if shareMerkleRoot != workMerkleTree.withFirst(cbtxn):
445                         raise RejectedShare('bad-txnmrklroot')
446                 
447                 if len(othertxndata):
448                         allowed = assembleBlock(data, txlist)[80:]
449                         if allowed != share['blkdata']:
450                                 raise RejectedShare('bad-txns')
451 checkShare.logger = logging.getLogger('checkShare')
452
453 def receiveShare(share):
454         # TODO: username => userid
455         try:
456                 checkShare(share)
457         except RejectedShare as rej:
458                 share['rejectReason'] = str(rej)
459                 raise
460         finally:
461                 if '_origdata' in share:
462                         share['solution'] = share['_origdata']
463                 else:
464                         share['solution'] = b2a_hex(swap32(share['data'])).decode('utf8')
465                 for i in loggersShare:
466                         i.logShare(share)
467
468 def newBlockNotification():
469         logging.getLogger('newBlockNotification').info('Received new block notification')
470         MM.updateMerkleTree()
471         # TODO: Force RESPOND TO LONGPOLLS?
472         pass
473
474 def newBlockNotificationSIGNAL(signum, frame):
475         # Use a new thread, in case the signal handler is called with locks held
476         thr = threading.Thread(target=newBlockNotification, name='newBlockNotification via signal %s' % (signum,))
477         thr.daemon = True
478         thr.start()
479
480 from signal import signal, SIGUSR1
481 signal(SIGUSR1, newBlockNotificationSIGNAL)
482
483
484 import os
485 import os.path
486 import pickle
487 import signal
488 import sys
489 from time import sleep
490 import traceback
491
492 SAVE_STATE_FILENAME = 'eloipool.worklog'
493
494 def stopServers():
495         logger = logging.getLogger('stopServers')
496         
497         if hasattr(stopServers, 'already'):
498                 logger.debug('Already tried to stop servers before')
499                 return
500         stopServers.already = True
501         
502         logger.info('Stopping servers...')
503         global bcnode, server
504         servers = (bcnode, server)
505         for s in servers:
506                 s.keepgoing = False
507         for s in servers:
508                 try:
509                         s.wakeup()
510                 except:
511                         logger.error('Failed to stop server %s\n%s' % (s, traceback.format_exc()))
512         i = 0
513         while True:
514                 sl = []
515                 for s in servers:
516                         if s.running:
517                                 sl.append(s.__class__.__name__)
518                 if not sl:
519                         break
520                 i += 1
521                 if i >= 0x100:
522                         logger.error('Servers taking too long to stop (%s), giving up' % (', '.join(sl)))
523                         break
524                 sleep(0.01)
525         
526         for s in servers:
527                 for fd in s._fd.keys():
528                         os.close(fd)
529
530 def stopLoggers():
531         for i in loggersShare:
532                 if hasattr(i, 'stop'):
533                         i.stop()
534
535 def saveState(t = None):
536         logger = logging.getLogger('saveState')
537         
538         # Then, save data needed to resume work
539         logger.info('Saving work state to \'%s\'...' % (SAVE_STATE_FILENAME,))
540         i = 0
541         while True:
542                 try:
543                         with open(SAVE_STATE_FILENAME, 'wb') as f:
544                                 pickle.dump(t, f)
545                                 pickle.dump(DupeShareHACK, f)
546                                 pickle.dump(workLog, f)
547                         break
548                 except:
549                         i += 1
550                         if i >= 0x10000:
551                                 logger.error('Failed to save work\n' + traceback.format_exc())
552                                 try:
553                                         os.unlink(SAVE_STATE_FILENAME)
554                                 except:
555                                         logger.error(('Failed to unlink \'%s\'; resume may have trouble\n' % (SAVE_STATE_FILENAME,)) + traceback.format_exc())
556
557 def exit():
558         t = time()
559         stopServers()
560         stopLoggers()
561         saveState(t)
562         logging.getLogger('exit').info('Goodbye...')
563         os.kill(os.getpid(), signal.SIGTERM)
564         sys.exit(0)
565
566 def restart():
567         t = time()
568         stopServers()
569         stopLoggers()
570         saveState(t)
571         logging.getLogger('restart').info('Restarting...')
572         try:
573                 os.execv(sys.argv[0], sys.argv)
574         except:
575                 logging.getLogger('restart').error('Failed to exec\n' + traceback.format_exc())
576
577 def restoreState():
578         if not os.path.exists(SAVE_STATE_FILENAME):
579                 return
580         
581         global workLog, DupeShareHACK
582         
583         logger = logging.getLogger('restoreState')
584         s = os.stat(SAVE_STATE_FILENAME)
585         logger.info('Restoring saved state from \'%s\' (%d bytes)' % (SAVE_STATE_FILENAME, s.st_size))
586         try:
587                 with open(SAVE_STATE_FILENAME, 'rb') as f:
588                         t = pickle.load(f)
589                         if type(t) == tuple:
590                                 if len(t) > 2:
591                                         # Future formats, not supported here
592                                         ver = t[3]
593                                         # TODO
594                                 
595                                 # Old format, from 2012-02-02 to 2012-02-03
596                                 workLog = t[0]
597                                 DupeShareHACK = t[1]
598                                 t = None
599                         else:
600                                 if isinstance(t, dict):
601                                         # Old format, from 2012-02-03 to 2012-02-03
602                                         DupeShareHACK = t
603                                         t = None
604                                 else:
605                                         # Current format, from 2012-02-03 onward
606                                         DupeShareHACK = pickle.load(f)
607                                 
608                                 if t + 120 >= time():
609                                         workLog = pickle.load(f)
610                                 else:
611                                         logger.debug('Skipping restore of expired workLog')
612         except:
613                 logger.error('Failed to restore state\n' + traceback.format_exc())
614                 return
615         logger.info('State restored successfully')
616         if t:
617                 logger.info('Total downtime: %g seconds' % (time() - t,))
618
619
620 from jsonrpcserver import JSONRPCListener, JSONRPCServer
621 import interactivemode
622 from networkserver import NetworkListener
623 import threading
624 import sharelogging
625 import imp
626
627 if __name__ == "__main__":
628         if not hasattr(config, 'ShareLogging'):
629                 config.ShareLogging = ()
630         if hasattr(config, 'DbOptions'):
631                 logging.getLogger('backwardCompatibility').warn('DbOptions configuration variable is deprecated; upgrade to ShareLogging var before 2013-03-05')
632                 config.ShareLogging = list(config.ShareLogging)
633                 config.ShareLogging.append( {
634                         'type': 'sql',
635                         'engine': 'postgres',
636                         'dbopts': config.DbOptions,
637                         'statement': "insert into shares (rem_host, username, our_result, upstream_result, reason, solution) values ({Q(remoteHost)}, {username}, {YN(not(rejectReason))}, {YN(upstreamResult)}, {rejectReason}, decode({solution}, 'hex'))",
638                 } )
639         for i in config.ShareLogging:
640                 if not hasattr(i, 'keys'):
641                         name, parameters = i
642                         logging.getLogger('backwardCompatibility').warn('Using short-term backward compatibility for ShareLogging[\'%s\']; be sure to update config before 2012-04-04' % (name,))
643                         if name == 'postgres':
644                                 name = 'sql'
645                                 i = {
646                                         'engine': 'postgres',
647                                         'dbopts': parameters,
648                                 }
649                         elif name == 'logfile':
650                                 i = {}
651                                 i['thropts'] = parameters
652                                 if 'filename' in parameters:
653                                         i['filename'] = parameters['filename']
654                                         i['thropts'] = dict(i['thropts'])
655                                         del i['thropts']['filename']
656                         else:
657                                 i = parameters
658                         i['type'] = name
659                 
660                 name = i['type']
661                 parameters = i
662                 try:
663                         fp, pathname, description = imp.find_module(name, sharelogging.__path__)
664                         m = imp.load_module(name, fp, pathname, description)
665                         lo = getattr(m, name)(**parameters)
666                         loggersShare.append(lo)
667                 except:
668                         logging.getLogger('sharelogging').error("Error setting up share logger %s: %s", name,  sys.exc_info())
669
670         LSbc = []
671         if not hasattr(config, 'BitcoinNodeAddresses'):
672                 config.BitcoinNodeAddresses = ()
673         for a in config.BitcoinNodeAddresses:
674                 LSbc.append(NetworkListener(bcnode, a))
675         
676         if hasattr(config, 'UpstreamBitcoindNode') and config.UpstreamBitcoindNode:
677                 BitcoinLink(bcnode, dest=config.UpstreamBitcoindNode)
678         
679         import jsonrpc_getblocktemplate
680         import jsonrpc_getwork
681         import jsonrpc_setworkaux
682         
683         server = JSONRPCServer()
684         server.tls = threading.local()
685         server.tls.wantClear = False
686         if hasattr(config, 'JSONRPCAddress'):
687                 logging.getLogger('backwardCompatibility').warn('JSONRPCAddress configuration variable is deprecated; upgrade to JSONRPCAddresses list before 2013-03-05')
688                 if not hasattr(config, 'JSONRPCAddresses'):
689                         config.JSONRPCAddresses = []
690                 config.JSONRPCAddresses.insert(0, config.JSONRPCAddress)
691         LS = []
692         for a in config.JSONRPCAddresses:
693                 LS.append(JSONRPCListener(server, a))
694         if hasattr(config, 'SecretUser'):
695                 server.SecretUser = config.SecretUser
696         server.aux = MM.CoinbaseAux
697         server.getBlockHeader = getBlockHeader
698         server.getBlockTemplate = getBlockTemplate
699         server.receiveShare = receiveShare
700         server.RaiseRedFlags = RaiseRedFlags
701         server.ShareTarget = config.ShareTarget
702         
703         if hasattr(config, 'TrustedForwarders'):
704                 server.TrustedForwarders = config.TrustedForwarders
705         server.ServerName = config.ServerName
706         
707         MM.start()
708         
709         restoreState()
710         
711         prune_thr = threading.Thread(target=WorkLogPruner, args=(workLog,))
712         prune_thr.daemon = True
713         prune_thr.start()
714         
715         bcnode_thr = threading.Thread(target=bcnode.serve_forever)
716         bcnode_thr.daemon = True
717         bcnode_thr.start()
718         
719         server.serve_forever()