Store yet more details (failure time and exceptions) on upstream submission failures
[bitcoin:eloipool.git] / eloipool.py
1 #!/usr/bin/python3
2 # Eloipool - Python Bitcoin pool server
3 # Copyright (C) 2011-2012  Luke Dashjr <luke-jr+eloipool@utopios.org>
4 #
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License as
7 # published by the Free Software Foundation, either version 3 of the
8 # License, or (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 # GNU Affero General Public License for more details.
14 #
15 # You should have received a copy of the GNU Affero General Public License
16 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
17
18 import config
19
20 if not hasattr(config, 'ServerName'):
21         config.ServerName = 'Unnamed Eloipool'
22
23 if not hasattr(config, 'ShareTarget'):
24         config.ShareTarget = 0x00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
25
26
27 import logging
28
29 if len(logging.root.handlers) == 0:
30         logging.basicConfig(
31                 format='%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s',
32                 level=logging.DEBUG,
33         )
34         for infoOnly in ('checkShare', 'JSONRPCHandler', 'merkleMaker', 'Waker for JSONRPCServer', 'JSONRPCServer'):
35                 logging.getLogger(infoOnly).setLevel(logging.INFO)
36
37 def RaiseRedFlags(reason):
38         logging.getLogger('redflag').critical(reason)
39         return reason
40
41
42 from bitcoin.node import BitcoinLink, BitcoinNode
43 bcnode = BitcoinNode(config.UpstreamNetworkId)
44 bcnode.userAgent += b'Eloipool:0.1/'
45
46 import jsonrpc
47 UpstreamBitcoindJSONRPC = jsonrpc.ServiceProxy(config.UpstreamURI)
48
49
50 from bitcoin.script import BitcoinScript
51 from bitcoin.txn import Txn
52 from base58 import b58decode
53 from struct import pack
54 import subprocess
55 from time import time
56
57 def makeCoinbaseTxn(coinbaseValue, useCoinbaser = True):
58         txn = Txn.new()
59         
60         if useCoinbaser and hasattr(config, 'CoinbaserCmd') and config.CoinbaserCmd:
61                 coinbased = 0
62                 try:
63                         cmd = config.CoinbaserCmd
64                         cmd = cmd.replace('%d', str(coinbaseValue))
65                         p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
66                         nout = int(p.stdout.readline())
67                         for i in range(nout):
68                                 amount = int(p.stdout.readline())
69                                 addr = p.stdout.readline().rstrip(b'\n').decode('utf8')
70                                 pkScript = BitcoinScript.toAddress(addr)
71                                 txn.addOutput(amount, pkScript)
72                                 coinbased += amount
73                 except:
74                         coinbased = coinbaseValue + 1
75                 if coinbased >= coinbaseValue:
76                         logging.getLogger('makeCoinbaseTxn').error('Coinbaser failed!')
77                         txn.outputs = []
78                 else:
79                         coinbaseValue -= coinbased
80         
81         pkScript = BitcoinScript.toAddress(config.TrackerAddr)
82         txn.addOutput(coinbaseValue, pkScript)
83         
84         # TODO
85         # TODO: red flag on dupe coinbase
86         return txn
87
88
89 import jsonrpc_getwork
90 from util import Bits2Target
91
92 workLog = {}
93 userStatus = {}
94 networkTarget = None
95 DupeShareHACK = {}
96
97 server = None
98 def updateBlocks():
99         server.wakeLongpoll()
100
101 def blockChanged():
102         global DupeShareHACK
103         DupeShareHACK = {}
104         jsonrpc_getwork._CheckForDupesHACK = {}
105         global MM, networkTarget, server
106         bits = MM.currentBlock[2]
107         if bits is None:
108                 networkTarget = None
109         else:
110                 networkTarget = Bits2Target(bits)
111         workLog.clear()
112         updateBlocks()
113
114
115 from time import sleep, time
116 import traceback
117
118 def _WorkLogPruner_I(wl):
119         now = time()
120         pruned = 0
121         for username in wl:
122                 userwork = wl[username]
123                 for wli in tuple(userwork.keys()):
124                         if now > userwork[wli][1] + 120:
125                                 del userwork[wli]
126                                 pruned += 1
127         WorkLogPruner.logger.debug('Pruned %d jobs' % (pruned,))
128
129 def WorkLogPruner(wl):
130         while True:
131                 try:
132                         sleep(60)
133                         _WorkLogPruner_I(wl)
134                 except:
135                         WorkLogPruner.logger.error(traceback.format_exc())
136 WorkLogPruner.logger = logging.getLogger('WorkLogPruner')
137
138
139 from merklemaker import merkleMaker
140 MM = merkleMaker()
141 MM.__dict__.update(config.__dict__)
142 MM.clearCoinbaseTxn = makeCoinbaseTxn(5000000000, False)  # FIXME
143 MM.clearCoinbaseTxn.assemble()
144 MM.makeCoinbaseTxn = makeCoinbaseTxn
145 MM.onBlockChange = blockChanged
146 MM.onBlockUpdate = updateBlocks
147
148
149 from binascii import b2a_hex
150 from copy import deepcopy
151 from math import log
152 from merklemaker import MakeBlockHeader
153 from struct import pack, unpack
154 import threading
155 from time import time
156 from util import RejectedShare, dblsha, hash2int, swap32, target2pdiff
157 import jsonrpc
158 import traceback
159
160 gotwork = None
161 if hasattr(config, 'GotWorkURI'):
162         gotwork = jsonrpc.ServiceProxy(config.GotWorkURI)
163
164 if not hasattr(config, 'DynamicTargetting'):
165         config.DynamicTargetting = 0
166 else:
167         if not hasattr(config, 'DynamicTargetWindow'):
168                 config.DynamicTargetWindow = 120
169         config.DynamicTargetGoal *= config.DynamicTargetWindow / 60
170
171 def submitGotwork(info):
172         try:
173                 gotwork.gotwork(info)
174         except:
175                 checkShare.logger.warning('Failed to submit gotwork\n' + traceback.format_exc())
176
177 def getTarget(username, now):
178         if not config.DynamicTargetting:
179                 return None
180         if username in userStatus:
181                 status = userStatus[username]
182         else:
183                 userStatus[username] = [None, now, 0]
184                 return None
185         (targetIn, lastUpdate, work) = status
186         if work <= config.DynamicTargetGoal:
187                 if now < lastUpdate + config.DynamicTargetWindow and (targetIn is None or targetIn >= networkTarget):
188                         return targetIn
189                 if not work:
190                         if targetIn:
191                                 getTarget.logger.debug("No shares from '%s', resetting to minimum target")
192                                 userStatus[username] = [None, now, 0]
193                         return None
194         
195         deltaSec = now - lastUpdate
196         target = targetIn or config.ShareTarget
197         target = int(target * config.DynamicTargetGoal * deltaSec / config.DynamicTargetWindow / work)
198         if target >= config.ShareTarget:
199                 target = None
200         else:
201                 if target < networkTarget:
202                         target = networkTarget
203                 if config.DynamicTargetting == 2:
204                         # Round target to a power of two :)
205                         target = 2**int(log(target, 2) + 1) - 1
206                 if target == config.ShareTarget:
207                         target = None
208         if target != targetIn:
209                 pfx = 'Retargetting %s' % (repr(username),)
210                 tin = targetIn or config.ShareTarget
211                 getTarget.logger.debug("%s from: %064x (pdiff %s)" % (pfx, tin, target2pdiff(tin)))
212                 tgt = target or config.ShareTarget
213                 getTarget.logger.debug("%s   to: %064x (pdiff %s)" % (pfx, tgt, target2pdiff(tgt)))
214         userStatus[username] = [target, now, 0]
215         return target
216 getTarget.logger = logging.getLogger('getTarget')
217
218 def RegisterWork(username, wli, wld):
219         now = time()
220         target = getTarget(username, now)
221         wld = tuple(wld) + (target,)
222         workLog.setdefault(username, {})[wli] = (wld, now)
223         return target or config.ShareTarget
224
225 def getBlockHeader(username):
226         MRD = MM.getMRD()
227         merkleRoot = MRD[0]
228         hdr = MakeBlockHeader(MRD)
229         workLog.setdefault(username, {})[merkleRoot] = (MRD, time())
230         target = RegisterWork(username, merkleRoot, MRD)
231         return (hdr, workLog[username][merkleRoot], target)
232
233 def getBlockTemplate(username):
234         MC = MM.getMC()
235         (dummy, merkleTree, coinbase, prevBlock, bits) = MC[:5]
236         wliPos = coinbase[0] + 2
237         wliLen = coinbase[wliPos - 1]
238         wli = coinbase[wliPos:wliPos+wliLen]
239         target = RegisterWork(username, wli, MC)
240         return (MC, workLog[username][wli], target)
241
242 loggersShare = []
243
244 RBDs = []
245 RBPs = []
246
247 from bitcoin.varlen import varlenEncode, varlenDecode
248 import bitcoin.txn
249 from merklemaker import assembleBlock
250
251 RBFs = []
252 def blockSubmissionThread(payload, blkhash, share):
253         myblock = (blkhash, payload[4:36])
254         payload = b2a_hex(payload).decode('ascii')
255         nexterr = 0
256         gmperr = None
257         while True:
258                 try:
259                         rv = UpstreamBitcoindJSONRPC.submitblock(payload)
260                         break
261                 except BaseException as gbterr:
262                         try:
263                                 rv = UpstreamBitcoindJSONRPC.getmemorypool(payload)
264                                 if rv is True:
265                                         rv = None
266                                 elif rv is False:
267                                         rv = 'rejected'
268                                 break
269                         except BaseException as e2:
270                                 gmperr = e2
271                         now = time()
272                         if now > nexterr:
273                                 # FIXME: This will show "Method not found" on pre-BIP22 servers
274                                 RaiseRedFlags(traceback.format_exc())
275                                 nexterr = now + 5
276                         if MM.currentBlock[0] not in myblock:
277                                 RBFs.append( (('next block', MM.currentBlock, now, (gbterr, gmperr)), payload, blkhash, share) )
278                                 RaiseRedFlags('Giving up on submitting block upstream')
279                                 return
280         if rv:
281                 # FIXME: The returned value could be a list of multiple responses
282                 RBFs.append( (('upstream reject', rv, now), payload, blkhash, share) )
283                 RaiseRedFlags('Upstream block submission failed: %s' % (rv,))
284
285 def checkShare(share):
286         shareTime = share['time'] = time()
287         
288         data = share['data']
289         data = data[:80]
290         (prevBlock, height, bits) = MM.currentBlock
291         sharePrevBlock = data[4:36]
292         if sharePrevBlock != prevBlock:
293                 if sharePrevBlock == MM.lastBlock[0]:
294                         raise RejectedShare('stale-prevblk')
295                 raise RejectedShare('bad-prevblk')
296         
297         # TODO: use userid
298         username = share['username']
299         if username not in workLog:
300                 raise RejectedShare('unknown-user')
301         
302         if data[72:76] != bits:
303                 raise RejectedShare('bad-diffbits')
304         
305         # Note that we should accept miners reducing version to 1 if they don't understand 2 yet
306         # FIXME: When the supermajority is upgraded to version 2, stop accepting 1!
307         if data[1:4] != b'\0\0\0' or data[0] > 2:
308                 raise RejectedShare('bad-version')
309         
310         shareMerkleRoot = data[36:68]
311         if 'blkdata' in share:
312                 pl = share['blkdata']
313                 (txncount, pl) = varlenDecode(pl)
314                 cbtxn = bitcoin.txn.Txn(pl)
315                 othertxndata = cbtxn.disassemble(retExtra=True)
316                 coinbase = cbtxn.getCoinbase()
317                 wliPos = coinbase[0] + 2
318                 wliLen = coinbase[wliPos - 1]
319                 wli = coinbase[wliPos:wliPos+wliLen]
320                 mode = 'MC'
321                 moden = 1
322         else:
323                 wli = shareMerkleRoot
324                 mode = 'MRD'
325                 moden = 0
326         
327         MWL = workLog[username]
328         if wli not in MWL:
329                 raise RejectedShare('unknown-work')
330         (wld, issueT) = MWL[wli]
331         share[mode] = wld
332         
333         if data in DupeShareHACK:
334                 raise RejectedShare('duplicate')
335         DupeShareHACK[data] = None
336         
337         blkhash = dblsha(data)
338         if blkhash[28:] != b'\0\0\0\0':
339                 raise RejectedShare('H-not-zero')
340         blkhashn = hash2int(blkhash)
341         
342         global networkTarget
343         logfunc = getattr(checkShare.logger, 'info' if blkhashn <= networkTarget else 'debug')
344         logfunc('BLKHASH: %64x' % (blkhashn,))
345         logfunc(' TARGET: %64x' % (networkTarget,))
346         
347         workMerkleTree = wld[1]
348         workCoinbase = wld[2]
349         workTarget = wld[6] if len(wld) > 6 else None
350         
351         # NOTE: this isn't actually needed for MC mode, but we're abusing it for a trivial share check...
352         txlist = workMerkleTree.data
353         txlist = [deepcopy(txlist[0]),] + txlist[1:]
354         cbtxn = txlist[0]
355         cbtxn.setCoinbase(workCoinbase)
356         cbtxn.assemble()
357         
358         if blkhashn <= networkTarget:
359                 logfunc("Submitting upstream")
360                 RBDs.append( deepcopy( (data, txlist, share.get('blkdata', None), workMerkleTree, share, wld) ) )
361                 if not moden:
362                         payload = assembleBlock(data, txlist)
363                 else:
364                         payload = share['data'] + share['blkdata']
365                 logfunc('Real block payload: %s' % (b2a_hex(payload).decode('utf8'),))
366                 RBPs.append(payload)
367                 threading.Thread(target=blockSubmissionThread, args=(payload, blkhash, share)).start()
368                 bcnode.submitBlock(payload)
369                 share['upstreamResult'] = True
370                 MM.updateBlock(blkhash)
371         
372         # Gotwork hack...
373         if gotwork and blkhashn <= config.GotWorkTarget:
374                 try:
375                         coinbaseMrkl = cbtxn.data
376                         coinbaseMrkl += blkhash
377                         steps = workMerkleTree._steps
378                         coinbaseMrkl += pack('B', len(steps))
379                         for step in steps:
380                                 coinbaseMrkl += step
381                         coinbaseMrkl += b"\0\0\0\0"
382                         info = {}
383                         info['hash'] = b2a_hex(blkhash).decode('ascii')
384                         info['header'] = b2a_hex(data).decode('ascii')
385                         info['coinbaseMrkl'] = b2a_hex(coinbaseMrkl).decode('ascii')
386                         thr = threading.Thread(target=submitGotwork, args=(info,))
387                         thr.daemon = True
388                         thr.start()
389                 except:
390                         checkShare.logger.warning('Failed to build gotwork request')
391         
392         if workTarget is None:
393                 workTarget = config.ShareTarget
394         if blkhashn > workTarget:
395                 raise RejectedShare('high-hash')
396         share['target'] = workTarget
397         share['_targethex'] = '%064x' % (workTarget,)
398         
399         shareTimestamp = unpack('<L', data[68:72])[0]
400         if shareTime < issueT - 120:
401                 raise RejectedShare('stale-work')
402         if shareTimestamp < shareTime - 300:
403                 raise RejectedShare('time-too-old')
404         if shareTimestamp > shareTime + 7200:
405                 raise RejectedShare('time-too-new')
406         
407         if config.DynamicTargetting and username in userStatus:
408                 # NOTE: userStatus[username] only doesn't exist across restarts
409                 status = userStatus[username]
410                 target = status[0] or config.ShareTarget
411                 if target == workTarget:
412                         userStatus[username][2] += 1
413                 else:
414                         userStatus[username][2] += float(target) / workTarget
415         
416         if moden:
417                 cbpre = cbtxn.getCoinbase()
418                 cbpreLen = len(cbpre)
419                 if coinbase[:cbpreLen] != cbpre:
420                         raise RejectedShare('bad-cb-prefix')
421                 
422                 # Filter out known "I support" flags, to prevent exploits
423                 for ff in (b'/P2SH/', b'NOP2SH', b'p2sh/CHV', b'p2sh/NOCHV'):
424                         if coinbase.find(ff) > max(-1, cbpreLen - len(ff)):
425                                 raise RejectedShare('bad-cb-flag')
426                 
427                 if len(coinbase) > 100:
428                         raise RejectedShare('bad-cb-length')
429                 
430                 cbtxn.setCoinbase(coinbase)
431                 cbtxn.assemble()
432                 if shareMerkleRoot != workMerkleTree.withFirst(cbtxn):
433                         raise RejectedShare('bad-txnmrklroot')
434                 
435                 if len(othertxndata):
436                         allowed = assembleBlock(data, txlist)[80:]
437                         if allowed != share['blkdata']:
438                                 raise RejectedShare('bad-txns')
439 checkShare.logger = logging.getLogger('checkShare')
440
441 def receiveShare(share):
442         # TODO: username => userid
443         try:
444                 checkShare(share)
445         except RejectedShare as rej:
446                 share['rejectReason'] = str(rej)
447                 raise
448         finally:
449                 if '_origdata' in share:
450                         share['solution'] = share['_origdata']
451                 else:
452                         share['solution'] = b2a_hex(swap32(share['data'])).decode('utf8')
453                 for i in loggersShare:
454                         i(share)
455
456 def newBlockNotification():
457         logging.getLogger('newBlockNotification').info('Received new block notification')
458         MM.updateMerkleTree()
459         # TODO: Force RESPOND TO LONGPOLLS?
460         pass
461
462 def newBlockNotificationSIGNAL(signum, frame):
463         # Use a new thread, in case the signal handler is called with locks held
464         thr = threading.Thread(target=newBlockNotification, name='newBlockNotification via signal %s' % (signum,))
465         thr.daemon = True
466         thr.start()
467
468 from signal import signal, SIGUSR1
469 signal(SIGUSR1, newBlockNotificationSIGNAL)
470
471
472 import os
473 import os.path
474 import pickle
475 import signal
476 import sys
477 from time import sleep
478 import traceback
479
480 SAVE_STATE_FILENAME = 'eloipool.worklog'
481
482 def stopServers():
483         logger = logging.getLogger('stopServers')
484         
485         if hasattr(stopServers, 'already'):
486                 logger.debug('Already tried to stop servers before')
487                 return
488         stopServers.already = True
489         
490         logger.info('Stopping servers...')
491         global bcnode, server
492         servers = (bcnode, server)
493         for s in servers:
494                 s.keepgoing = False
495         for s in servers:
496                 try:
497                         s.wakeup()
498                 except:
499                         logger.error('Failed to stop server %s\n%s' % (s, traceback.format_exc()))
500         i = 0
501         while True:
502                 sl = []
503                 for s in servers:
504                         if s.running:
505                                 sl.append(s.__class__.__name__)
506                 if not sl:
507                         break
508                 i += 1
509                 if i >= 0x100:
510                         logger.error('Servers taking too long to stop (%s), giving up' % (', '.join(sl)))
511                         break
512                 sleep(0.01)
513         
514         for s in servers:
515                 for fd in s._fd.keys():
516                         os.close(fd)
517
518 def saveState(t = None):
519         logger = logging.getLogger('saveState')
520         
521         # Then, save data needed to resume work
522         logger.info('Saving work state to \'%s\'...' % (SAVE_STATE_FILENAME,))
523         i = 0
524         while True:
525                 try:
526                         with open(SAVE_STATE_FILENAME, 'wb') as f:
527                                 pickle.dump(t, f)
528                                 pickle.dump(DupeShareHACK, f)
529                                 pickle.dump(workLog, f)
530                         break
531                 except:
532                         i += 1
533                         if i >= 0x10000:
534                                 logger.error('Failed to save work\n' + traceback.format_exc())
535                                 try:
536                                         os.unlink(SAVE_STATE_FILENAME)
537                                 except:
538                                         logger.error(('Failed to unlink \'%s\'; resume may have trouble\n' % (SAVE_STATE_FILENAME,)) + traceback.format_exc())
539
540 def exit():
541         t = time()
542         stopServers()
543         saveState(t)
544         logging.getLogger('exit').info('Goodbye...')
545         os.kill(os.getpid(), signal.SIGTERM)
546         sys.exit(0)
547
548 def restart():
549         t = time()
550         stopServers()
551         saveState(t)
552         logging.getLogger('restart').info('Restarting...')
553         try:
554                 os.execv(sys.argv[0], sys.argv)
555         except:
556                 logging.getLogger('restart').error('Failed to exec\n' + traceback.format_exc())
557
558 def restoreState():
559         if not os.path.exists(SAVE_STATE_FILENAME):
560                 return
561         
562         global workLog, DupeShareHACK
563         
564         logger = logging.getLogger('restoreState')
565         s = os.stat(SAVE_STATE_FILENAME)
566         logger.info('Restoring saved state from \'%s\' (%d bytes)' % (SAVE_STATE_FILENAME, s.st_size))
567         try:
568                 with open(SAVE_STATE_FILENAME, 'rb') as f:
569                         t = pickle.load(f)
570                         if type(t) == tuple:
571                                 if len(t) > 2:
572                                         # Future formats, not supported here
573                                         ver = t[3]
574                                         # TODO
575                                 
576                                 # Old format, from 2012-02-02 to 2012-02-03
577                                 workLog = t[0]
578                                 DupeShareHACK = t[1]
579                                 t = None
580                         else:
581                                 if isinstance(t, dict):
582                                         # Old format, from 2012-02-03 to 2012-02-03
583                                         DupeShareHACK = t
584                                         t = None
585                                 else:
586                                         # Current format, from 2012-02-03 onward
587                                         DupeShareHACK = pickle.load(f)
588                                 
589                                 if t + 120 >= time():
590                                         workLog = pickle.load(f)
591                                 else:
592                                         logger.debug('Skipping restore of expired workLog')
593         except:
594                 logger.error('Failed to restore state\n' + traceback.format_exc())
595                 return
596         logger.info('State restored successfully')
597         if t:
598                 logger.info('Total downtime: %g seconds' % (time() - t,))
599
600
601 from jsonrpcserver import JSONRPCListener, JSONRPCServer
602 import interactivemode
603 from networkserver import NetworkListener
604 import threading
605 import sharelogging
606 import imp
607
608 if __name__ == "__main__":
609         if not hasattr(config, 'ShareLogging'):
610                 config.ShareLogging = ()
611         if hasattr(config, 'DbOptions'):
612                 logging.getLogger('backwardCompatibility').warn('DbOptions configuration variable is deprecated; upgrade to ShareLogging var before 2013-03-05')
613                 config.ShareLogging = list(config.ShareLogging)
614                 config.ShareLogging.append( {
615                         'type': 'sql',
616                         'engine': 'postgres',
617                         'dbopts': config.DbOptions,
618                         'statement': "insert into shares (rem_host, username, our_result, upstream_result, reason, solution) values ({Q(remoteHost)}, {username}, {YN(not(rejectReason))}, {YN(upstreamResult)}, {rejectReason}, decode({solution}, 'hex'))",
619                 } )
620         for i in config.ShareLogging:
621                 if not hasattr(i, 'keys'):
622                         name, parameters = i
623                         logging.getLogger('backwardCompatibility').warn('Using short-term backward compatibility for ShareLogging[\'%s\']; be sure to update config before 2012-04-04' % (name,))
624                         if name == 'postgres':
625                                 name = 'sql'
626                                 i = {
627                                         'engine': 'postgres',
628                                         'dbopts': parameters,
629                                 }
630                         elif name == 'logfile':
631                                 i = {}
632                                 i['thropts'] = parameters
633                                 if 'filename' in parameters:
634                                         i['filename'] = parameters['filename']
635                                         i['thropts'] = dict(i['thropts'])
636                                         del i['thropts']['filename']
637                         else:
638                                 i = parameters
639                         i['type'] = name
640                 
641                 name = i['type']
642                 parameters = i
643                 try:
644                         fp, pathname, description = imp.find_module(name, sharelogging.__path__)
645                         m = imp.load_module(name, fp, pathname, description)
646                         lo = getattr(m, name)(**parameters)
647                         loggersShare.append(lo.logShare)
648                 except:
649                         logging.getLogger('sharelogging').error("Error setting up share logger %s: %s", name,  sys.exc_info())
650
651         LSbc = []
652         if not hasattr(config, 'BitcoinNodeAddresses'):
653                 config.BitcoinNodeAddresses = ()
654         for a in config.BitcoinNodeAddresses:
655                 LSbc.append(NetworkListener(bcnode, a))
656         
657         if hasattr(config, 'UpstreamBitcoindNode') and config.UpstreamBitcoindNode:
658                 BitcoinLink(bcnode, dest=config.UpstreamBitcoindNode)
659         
660         import jsonrpc_getblocktemplate
661         import jsonrpc_getwork
662         import jsonrpc_setworkaux
663         
664         server = JSONRPCServer()
665         if hasattr(config, 'JSONRPCAddress'):
666                 logging.getLogger('backwardCompatibility').warn('JSONRPCAddress configuration variable is deprecated; upgrade to JSONRPCAddresses list before 2013-03-05')
667                 if not hasattr(config, 'JSONRPCAddresses'):
668                         config.JSONRPCAddresses = []
669                 config.JSONRPCAddresses.insert(0, config.JSONRPCAddress)
670         LS = []
671         for a in config.JSONRPCAddresses:
672                 LS.append(JSONRPCListener(server, a))
673         if hasattr(config, 'SecretUser'):
674                 server.SecretUser = config.SecretUser
675         server.aux = MM.CoinbaseAux
676         server.getBlockHeader = getBlockHeader
677         server.getBlockTemplate = getBlockTemplate
678         server.receiveShare = receiveShare
679         server.RaiseRedFlags = RaiseRedFlags
680         server.ShareTarget = config.ShareTarget
681         
682         if hasattr(config, 'TrustedForwarders'):
683                 server.TrustedForwarders = config.TrustedForwarders
684         server.ServerName = config.ServerName
685         
686         MM.start()
687         
688         restoreState()
689         
690         prune_thr = threading.Thread(target=WorkLogPruner, args=(workLog,))
691         prune_thr.daemon = True
692         prune_thr.start()
693         
694         bcnode_thr = threading.Thread(target=bcnode.serve_forever)
695         bcnode_thr.daemon = True
696         bcnode_thr.start()
697         
698         server.serve_forever()