Store information on upstream block submission failures in new RBFs array
[bitcoin:eloipool.git] / eloipool.py
1 #!/usr/bin/python3
2 # Eloipool - Python Bitcoin pool server
3 # Copyright (C) 2011-2012  Luke Dashjr <luke-jr+eloipool@utopios.org>
4 #
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License as
7 # published by the Free Software Foundation, either version 3 of the
8 # License, or (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 # GNU Affero General Public License for more details.
14 #
15 # You should have received a copy of the GNU Affero General Public License
16 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
17
18 import config
19
20 if not hasattr(config, 'ServerName'):
21         config.ServerName = 'Unnamed Eloipool'
22
23 if not hasattr(config, 'ShareTarget'):
24         config.ShareTarget = 0x00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
25
26
27 import logging
28
29 if len(logging.root.handlers) == 0:
30         logging.basicConfig(
31                 format='%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s',
32                 level=logging.DEBUG,
33         )
34         for infoOnly in ('checkShare', 'JSONRPCHandler', 'merkleMaker', 'Waker for JSONRPCServer', 'JSONRPCServer'):
35                 logging.getLogger(infoOnly).setLevel(logging.INFO)
36
37 def RaiseRedFlags(reason):
38         logging.getLogger('redflag').critical(reason)
39         return reason
40
41
42 from bitcoin.node import BitcoinLink, BitcoinNode
43 bcnode = BitcoinNode(config.UpstreamNetworkId)
44 bcnode.userAgent += b'Eloipool:0.1/'
45
46 import jsonrpc
47 UpstreamBitcoindJSONRPC = jsonrpc.ServiceProxy(config.UpstreamURI)
48
49
50 from bitcoin.script import BitcoinScript
51 from bitcoin.txn import Txn
52 from base58 import b58decode
53 from struct import pack
54 import subprocess
55 from time import time
56
57 def makeCoinbaseTxn(coinbaseValue, useCoinbaser = True):
58         txn = Txn.new()
59         
60         if useCoinbaser and hasattr(config, 'CoinbaserCmd') and config.CoinbaserCmd:
61                 coinbased = 0
62                 try:
63                         cmd = config.CoinbaserCmd
64                         cmd = cmd.replace('%d', str(coinbaseValue))
65                         p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
66                         nout = int(p.stdout.readline())
67                         for i in range(nout):
68                                 amount = int(p.stdout.readline())
69                                 addr = p.stdout.readline().rstrip(b'\n').decode('utf8')
70                                 pkScript = BitcoinScript.toAddress(addr)
71                                 txn.addOutput(amount, pkScript)
72                                 coinbased += amount
73                 except:
74                         coinbased = coinbaseValue + 1
75                 if coinbased >= coinbaseValue:
76                         logging.getLogger('makeCoinbaseTxn').error('Coinbaser failed!')
77                         txn.outputs = []
78                 else:
79                         coinbaseValue -= coinbased
80         
81         pkScript = BitcoinScript.toAddress(config.TrackerAddr)
82         txn.addOutput(coinbaseValue, pkScript)
83         
84         # TODO
85         # TODO: red flag on dupe coinbase
86         return txn
87
88
89 import jsonrpc_getwork
90 from util import Bits2Target
91
92 workLog = {}
93 userStatus = {}
94 networkTarget = None
95 DupeShareHACK = {}
96
97 server = None
98 def updateBlocks():
99         server.wakeLongpoll()
100
101 def blockChanged():
102         global DupeShareHACK
103         DupeShareHACK = {}
104         jsonrpc_getwork._CheckForDupesHACK = {}
105         global MM, networkTarget, server
106         bits = MM.currentBlock[2]
107         if bits is None:
108                 networkTarget = None
109         else:
110                 networkTarget = Bits2Target(bits)
111         workLog.clear()
112         updateBlocks()
113
114
115 from time import sleep, time
116 import traceback
117
118 def _WorkLogPruner_I(wl):
119         now = time()
120         pruned = 0
121         for username in wl:
122                 userwork = wl[username]
123                 for wli in tuple(userwork.keys()):
124                         if now > userwork[wli][1] + 120:
125                                 del userwork[wli]
126                                 pruned += 1
127         WorkLogPruner.logger.debug('Pruned %d jobs' % (pruned,))
128
129 def WorkLogPruner(wl):
130         while True:
131                 try:
132                         sleep(60)
133                         _WorkLogPruner_I(wl)
134                 except:
135                         WorkLogPruner.logger.error(traceback.format_exc())
136 WorkLogPruner.logger = logging.getLogger('WorkLogPruner')
137
138
139 from merklemaker import merkleMaker
140 MM = merkleMaker()
141 MM.__dict__.update(config.__dict__)
142 MM.clearCoinbaseTxn = makeCoinbaseTxn(5000000000, False)  # FIXME
143 MM.clearCoinbaseTxn.assemble()
144 MM.makeCoinbaseTxn = makeCoinbaseTxn
145 MM.onBlockChange = blockChanged
146 MM.onBlockUpdate = updateBlocks
147
148
149 from binascii import b2a_hex
150 from copy import deepcopy
151 from math import log
152 from merklemaker import MakeBlockHeader
153 from struct import pack, unpack
154 import threading
155 from time import time
156 from util import RejectedShare, dblsha, hash2int, swap32, target2pdiff
157 import jsonrpc
158 import traceback
159
160 gotwork = None
161 if hasattr(config, 'GotWorkURI'):
162         gotwork = jsonrpc.ServiceProxy(config.GotWorkURI)
163
164 if not hasattr(config, 'DynamicTargetting'):
165         config.DynamicTargetting = 0
166 else:
167         if not hasattr(config, 'DynamicTargetWindow'):
168                 config.DynamicTargetWindow = 120
169         config.DynamicTargetGoal *= config.DynamicTargetWindow / 60
170
171 def submitGotwork(info):
172         try:
173                 gotwork.gotwork(info)
174         except:
175                 checkShare.logger.warning('Failed to submit gotwork\n' + traceback.format_exc())
176
177 def getTarget(username, now):
178         if not config.DynamicTargetting:
179                 return None
180         if username in userStatus:
181                 status = userStatus[username]
182         else:
183                 userStatus[username] = [None, now, 0]
184                 return None
185         (targetIn, lastUpdate, work) = status
186         if work <= config.DynamicTargetGoal:
187                 if now < lastUpdate + config.DynamicTargetWindow and (targetIn is None or targetIn >= networkTarget):
188                         return targetIn
189                 if not work:
190                         if targetIn:
191                                 getTarget.logger.debug("No shares from '%s', resetting to minimum target")
192                                 userStatus[username] = [None, now, 0]
193                         return None
194         
195         deltaSec = now - lastUpdate
196         target = targetIn or config.ShareTarget
197         target = int(target * config.DynamicTargetGoal * deltaSec / config.DynamicTargetWindow / work)
198         if target >= config.ShareTarget:
199                 target = None
200         else:
201                 if target < networkTarget:
202                         target = networkTarget
203                 if config.DynamicTargetting == 2:
204                         # Round target to a power of two :)
205                         target = 2**int(log(target, 2) + 1) - 1
206                 if target == config.ShareTarget:
207                         target = None
208         if target != targetIn:
209                 pfx = 'Retargetting %s' % (repr(username),)
210                 tin = targetIn or config.ShareTarget
211                 getTarget.logger.debug("%s from: %064x (pdiff %s)" % (pfx, tin, target2pdiff(tin)))
212                 tgt = target or config.ShareTarget
213                 getTarget.logger.debug("%s   to: %064x (pdiff %s)" % (pfx, tgt, target2pdiff(tgt)))
214         userStatus[username] = [target, now, 0]
215         return target
216 getTarget.logger = logging.getLogger('getTarget')
217
218 def RegisterWork(username, wli, wld):
219         now = time()
220         target = getTarget(username, now)
221         wld = tuple(wld) + (target,)
222         workLog.setdefault(username, {})[wli] = (wld, now)
223         return target or config.ShareTarget
224
225 def getBlockHeader(username):
226         MRD = MM.getMRD()
227         merkleRoot = MRD[0]
228         hdr = MakeBlockHeader(MRD)
229         workLog.setdefault(username, {})[merkleRoot] = (MRD, time())
230         target = RegisterWork(username, merkleRoot, MRD)
231         return (hdr, workLog[username][merkleRoot], target)
232
233 def getBlockTemplate(username):
234         MC = MM.getMC()
235         (dummy, merkleTree, coinbase, prevBlock, bits) = MC[:5]
236         wliPos = coinbase[0] + 2
237         wliLen = coinbase[wliPos - 1]
238         wli = coinbase[wliPos:wliPos+wliLen]
239         target = RegisterWork(username, wli, MC)
240         return (MC, workLog[username][wli], target)
241
242 loggersShare = []
243
244 RBDs = []
245 RBPs = []
246
247 from bitcoin.varlen import varlenEncode, varlenDecode
248 import bitcoin.txn
249 from merklemaker import assembleBlock
250
251 RBFs = []
252 def blockSubmissionThread(payload, blkhash, share):
253         myblock = (blkhash, payload[4:36])
254         payload = b2a_hex(payload).decode('ascii')
255         nexterr = 0
256         while True:
257                 try:
258                         rv = UpstreamBitcoindJSONRPC.submitblock(payload)
259                         break
260                 except:
261                         try:
262                                 rv = UpstreamBitcoindJSONRPC.getmemorypool(payload)
263                                 if rv is True:
264                                         rv = None
265                                 elif rv is False:
266                                         rv = 'rejected'
267                                 break
268                         except:
269                                 pass
270                         now = time()
271                         if now > nexterr:
272                                 # FIXME: This will show "Method not found" on pre-BIP22 servers
273                                 RaiseRedFlags(traceback.format_exc())
274                                 nexterr = now + 5
275                         if MM.currentBlock[0] not in myblock:
276                                 RBFs.append( (('next block', MM.currentBlock), payload, blkhash, share) )
277                                 RaiseRedFlags('Giving up on submitting block upstream')
278                                 return
279         if rv:
280                 # FIXME: The returned value could be a list of multiple responses
281                 RBFs.append( (('upstream reject', rv), payload, blkhash, share) )
282                 RaiseRedFlags('Upstream block submission failed: %s' % (rv,))
283
284 def checkShare(share):
285         shareTime = share['time'] = time()
286         
287         data = share['data']
288         data = data[:80]
289         (prevBlock, height, bits) = MM.currentBlock
290         sharePrevBlock = data[4:36]
291         if sharePrevBlock != prevBlock:
292                 if sharePrevBlock == MM.lastBlock[0]:
293                         raise RejectedShare('stale-prevblk')
294                 raise RejectedShare('bad-prevblk')
295         
296         # TODO: use userid
297         username = share['username']
298         if username not in workLog:
299                 raise RejectedShare('unknown-user')
300         
301         if data[72:76] != bits:
302                 raise RejectedShare('bad-diffbits')
303         
304         # Note that we should accept miners reducing version to 1 if they don't understand 2 yet
305         # FIXME: When the supermajority is upgraded to version 2, stop accepting 1!
306         if data[1:4] != b'\0\0\0' or data[0] > 2:
307                 raise RejectedShare('bad-version')
308         
309         shareMerkleRoot = data[36:68]
310         if 'blkdata' in share:
311                 pl = share['blkdata']
312                 (txncount, pl) = varlenDecode(pl)
313                 cbtxn = bitcoin.txn.Txn(pl)
314                 othertxndata = cbtxn.disassemble(retExtra=True)
315                 coinbase = cbtxn.getCoinbase()
316                 wliPos = coinbase[0] + 2
317                 wliLen = coinbase[wliPos - 1]
318                 wli = coinbase[wliPos:wliPos+wliLen]
319                 mode = 'MC'
320                 moden = 1
321         else:
322                 wli = shareMerkleRoot
323                 mode = 'MRD'
324                 moden = 0
325         
326         MWL = workLog[username]
327         if wli not in MWL:
328                 raise RejectedShare('unknown-work')
329         (wld, issueT) = MWL[wli]
330         share[mode] = wld
331         
332         if data in DupeShareHACK:
333                 raise RejectedShare('duplicate')
334         DupeShareHACK[data] = None
335         
336         blkhash = dblsha(data)
337         if blkhash[28:] != b'\0\0\0\0':
338                 raise RejectedShare('H-not-zero')
339         blkhashn = hash2int(blkhash)
340         
341         global networkTarget
342         logfunc = getattr(checkShare.logger, 'info' if blkhashn <= networkTarget else 'debug')
343         logfunc('BLKHASH: %64x' % (blkhashn,))
344         logfunc(' TARGET: %64x' % (networkTarget,))
345         
346         workMerkleTree = wld[1]
347         workCoinbase = wld[2]
348         workTarget = wld[6] if len(wld) > 6 else None
349         
350         # NOTE: this isn't actually needed for MC mode, but we're abusing it for a trivial share check...
351         txlist = workMerkleTree.data
352         txlist = [deepcopy(txlist[0]),] + txlist[1:]
353         cbtxn = txlist[0]
354         cbtxn.setCoinbase(workCoinbase)
355         cbtxn.assemble()
356         
357         if blkhashn <= networkTarget:
358                 logfunc("Submitting upstream")
359                 RBDs.append( deepcopy( (data, txlist, share.get('blkdata', None), workMerkleTree, share, wld) ) )
360                 if not moden:
361                         payload = assembleBlock(data, txlist)
362                 else:
363                         payload = share['data'] + share['blkdata']
364                 logfunc('Real block payload: %s' % (b2a_hex(payload).decode('utf8'),))
365                 RBPs.append(payload)
366                 threading.Thread(target=blockSubmissionThread, args=(payload, blkhash, share)).start()
367                 bcnode.submitBlock(payload)
368                 share['upstreamResult'] = True
369                 MM.updateBlock(blkhash)
370         
371         # Gotwork hack...
372         if gotwork and blkhashn <= config.GotWorkTarget:
373                 try:
374                         coinbaseMrkl = cbtxn.data
375                         coinbaseMrkl += blkhash
376                         steps = workMerkleTree._steps
377                         coinbaseMrkl += pack('B', len(steps))
378                         for step in steps:
379                                 coinbaseMrkl += step
380                         coinbaseMrkl += b"\0\0\0\0"
381                         info = {}
382                         info['hash'] = b2a_hex(blkhash).decode('ascii')
383                         info['header'] = b2a_hex(data).decode('ascii')
384                         info['coinbaseMrkl'] = b2a_hex(coinbaseMrkl).decode('ascii')
385                         thr = threading.Thread(target=submitGotwork, args=(info,))
386                         thr.daemon = True
387                         thr.start()
388                 except:
389                         checkShare.logger.warning('Failed to build gotwork request')
390         
391         if workTarget is None:
392                 workTarget = config.ShareTarget
393         if blkhashn > workTarget:
394                 raise RejectedShare('high-hash')
395         share['target'] = workTarget
396         share['_targethex'] = '%064x' % (workTarget,)
397         
398         shareTimestamp = unpack('<L', data[68:72])[0]
399         if shareTime < issueT - 120:
400                 raise RejectedShare('stale-work')
401         if shareTimestamp < shareTime - 300:
402                 raise RejectedShare('time-too-old')
403         if shareTimestamp > shareTime + 7200:
404                 raise RejectedShare('time-too-new')
405         
406         if config.DynamicTargetting and username in userStatus:
407                 # NOTE: userStatus[username] only doesn't exist across restarts
408                 status = userStatus[username]
409                 target = status[0] or config.ShareTarget
410                 if target == workTarget:
411                         userStatus[username][2] += 1
412                 else:
413                         userStatus[username][2] += float(target) / workTarget
414         
415         if moden:
416                 cbpre = cbtxn.getCoinbase()
417                 cbpreLen = len(cbpre)
418                 if coinbase[:cbpreLen] != cbpre:
419                         raise RejectedShare('bad-cb-prefix')
420                 
421                 # Filter out known "I support" flags, to prevent exploits
422                 for ff in (b'/P2SH/', b'NOP2SH', b'p2sh/CHV', b'p2sh/NOCHV'):
423                         if coinbase.find(ff) > max(-1, cbpreLen - len(ff)):
424                                 raise RejectedShare('bad-cb-flag')
425                 
426                 if len(coinbase) > 100:
427                         raise RejectedShare('bad-cb-length')
428                 
429                 cbtxn.setCoinbase(coinbase)
430                 cbtxn.assemble()
431                 if shareMerkleRoot != workMerkleTree.withFirst(cbtxn):
432                         raise RejectedShare('bad-txnmrklroot')
433                 
434                 if len(othertxndata):
435                         allowed = assembleBlock(data, txlist)[80:]
436                         if allowed != share['blkdata']:
437                                 raise RejectedShare('bad-txns')
438 checkShare.logger = logging.getLogger('checkShare')
439
440 def receiveShare(share):
441         # TODO: username => userid
442         try:
443                 checkShare(share)
444         except RejectedShare as rej:
445                 share['rejectReason'] = str(rej)
446                 raise
447         finally:
448                 if '_origdata' in share:
449                         share['solution'] = share['_origdata']
450                 else:
451                         share['solution'] = b2a_hex(swap32(share['data'])).decode('utf8')
452                 for i in loggersShare:
453                         i(share)
454
455 def newBlockNotification():
456         logging.getLogger('newBlockNotification').info('Received new block notification')
457         MM.updateMerkleTree()
458         # TODO: Force RESPOND TO LONGPOLLS?
459         pass
460
461 def newBlockNotificationSIGNAL(signum, frame):
462         # Use a new thread, in case the signal handler is called with locks held
463         thr = threading.Thread(target=newBlockNotification, name='newBlockNotification via signal %s' % (signum,))
464         thr.daemon = True
465         thr.start()
466
467 from signal import signal, SIGUSR1
468 signal(SIGUSR1, newBlockNotificationSIGNAL)
469
470
471 import os
472 import os.path
473 import pickle
474 import signal
475 import sys
476 from time import sleep
477 import traceback
478
479 SAVE_STATE_FILENAME = 'eloipool.worklog'
480
481 def stopServers():
482         logger = logging.getLogger('stopServers')
483         
484         if hasattr(stopServers, 'already'):
485                 logger.debug('Already tried to stop servers before')
486                 return
487         stopServers.already = True
488         
489         logger.info('Stopping servers...')
490         global bcnode, server
491         servers = (bcnode, server)
492         for s in servers:
493                 s.keepgoing = False
494         for s in servers:
495                 try:
496                         s.wakeup()
497                 except:
498                         logger.error('Failed to stop server %s\n%s' % (s, traceback.format_exc()))
499         i = 0
500         while True:
501                 sl = []
502                 for s in servers:
503                         if s.running:
504                                 sl.append(s.__class__.__name__)
505                 if not sl:
506                         break
507                 i += 1
508                 if i >= 0x100:
509                         logger.error('Servers taking too long to stop (%s), giving up' % (', '.join(sl)))
510                         break
511                 sleep(0.01)
512         
513         for s in servers:
514                 for fd in s._fd.keys():
515                         os.close(fd)
516
517 def saveState(t = None):
518         logger = logging.getLogger('saveState')
519         
520         # Then, save data needed to resume work
521         logger.info('Saving work state to \'%s\'...' % (SAVE_STATE_FILENAME,))
522         i = 0
523         while True:
524                 try:
525                         with open(SAVE_STATE_FILENAME, 'wb') as f:
526                                 pickle.dump(t, f)
527                                 pickle.dump(DupeShareHACK, f)
528                                 pickle.dump(workLog, f)
529                         break
530                 except:
531                         i += 1
532                         if i >= 0x10000:
533                                 logger.error('Failed to save work\n' + traceback.format_exc())
534                                 try:
535                                         os.unlink(SAVE_STATE_FILENAME)
536                                 except:
537                                         logger.error(('Failed to unlink \'%s\'; resume may have trouble\n' % (SAVE_STATE_FILENAME,)) + traceback.format_exc())
538
539 def exit():
540         t = time()
541         stopServers()
542         saveState(t)
543         logging.getLogger('exit').info('Goodbye...')
544         os.kill(os.getpid(), signal.SIGTERM)
545         sys.exit(0)
546
547 def restart():
548         t = time()
549         stopServers()
550         saveState(t)
551         logging.getLogger('restart').info('Restarting...')
552         try:
553                 os.execv(sys.argv[0], sys.argv)
554         except:
555                 logging.getLogger('restart').error('Failed to exec\n' + traceback.format_exc())
556
557 def restoreState():
558         if not os.path.exists(SAVE_STATE_FILENAME):
559                 return
560         
561         global workLog, DupeShareHACK
562         
563         logger = logging.getLogger('restoreState')
564         s = os.stat(SAVE_STATE_FILENAME)
565         logger.info('Restoring saved state from \'%s\' (%d bytes)' % (SAVE_STATE_FILENAME, s.st_size))
566         try:
567                 with open(SAVE_STATE_FILENAME, 'rb') as f:
568                         t = pickle.load(f)
569                         if type(t) == tuple:
570                                 if len(t) > 2:
571                                         # Future formats, not supported here
572                                         ver = t[3]
573                                         # TODO
574                                 
575                                 # Old format, from 2012-02-02 to 2012-02-03
576                                 workLog = t[0]
577                                 DupeShareHACK = t[1]
578                                 t = None
579                         else:
580                                 if isinstance(t, dict):
581                                         # Old format, from 2012-02-03 to 2012-02-03
582                                         DupeShareHACK = t
583                                         t = None
584                                 else:
585                                         # Current format, from 2012-02-03 onward
586                                         DupeShareHACK = pickle.load(f)
587                                 
588                                 if t + 120 >= time():
589                                         workLog = pickle.load(f)
590                                 else:
591                                         logger.debug('Skipping restore of expired workLog')
592         except:
593                 logger.error('Failed to restore state\n' + traceback.format_exc())
594                 return
595         logger.info('State restored successfully')
596         if t:
597                 logger.info('Total downtime: %g seconds' % (time() - t,))
598
599
600 from jsonrpcserver import JSONRPCListener, JSONRPCServer
601 import interactivemode
602 from networkserver import NetworkListener
603 import threading
604 import sharelogging
605 import imp
606
607 if __name__ == "__main__":
608         if not hasattr(config, 'ShareLogging'):
609                 config.ShareLogging = ()
610         if hasattr(config, 'DbOptions'):
611                 logging.getLogger('backwardCompatibility').warn('DbOptions configuration variable is deprecated; upgrade to ShareLogging var before 2013-03-05')
612                 config.ShareLogging = list(config.ShareLogging)
613                 config.ShareLogging.append( {
614                         'type': 'sql',
615                         'engine': 'postgres',
616                         'dbopts': config.DbOptions,
617                         'statement': "insert into shares (rem_host, username, our_result, upstream_result, reason, solution) values ({Q(remoteHost)}, {username}, {YN(not(rejectReason))}, {YN(upstreamResult)}, {rejectReason}, decode({solution}, 'hex'))",
618                 } )
619         for i in config.ShareLogging:
620                 if not hasattr(i, 'keys'):
621                         name, parameters = i
622                         logging.getLogger('backwardCompatibility').warn('Using short-term backward compatibility for ShareLogging[\'%s\']; be sure to update config before 2012-04-04' % (name,))
623                         if name == 'postgres':
624                                 name = 'sql'
625                                 i = {
626                                         'engine': 'postgres',
627                                         'dbopts': parameters,
628                                 }
629                         elif name == 'logfile':
630                                 i = {}
631                                 i['thropts'] = parameters
632                                 if 'filename' in parameters:
633                                         i['filename'] = parameters['filename']
634                                         i['thropts'] = dict(i['thropts'])
635                                         del i['thropts']['filename']
636                         else:
637                                 i = parameters
638                         i['type'] = name
639                 
640                 name = i['type']
641                 parameters = i
642                 try:
643                         fp, pathname, description = imp.find_module(name, sharelogging.__path__)
644                         m = imp.load_module(name, fp, pathname, description)
645                         lo = getattr(m, name)(**parameters)
646                         loggersShare.append(lo.logShare)
647                 except:
648                         logging.getLogger('sharelogging').error("Error setting up share logger %s: %s", name,  sys.exc_info())
649
650         LSbc = []
651         if not hasattr(config, 'BitcoinNodeAddresses'):
652                 config.BitcoinNodeAddresses = ()
653         for a in config.BitcoinNodeAddresses:
654                 LSbc.append(NetworkListener(bcnode, a))
655         
656         if hasattr(config, 'UpstreamBitcoindNode') and config.UpstreamBitcoindNode:
657                 BitcoinLink(bcnode, dest=config.UpstreamBitcoindNode)
658         
659         import jsonrpc_getblocktemplate
660         import jsonrpc_getwork
661         import jsonrpc_setworkaux
662         
663         server = JSONRPCServer()
664         if hasattr(config, 'JSONRPCAddress'):
665                 logging.getLogger('backwardCompatibility').warn('JSONRPCAddress configuration variable is deprecated; upgrade to JSONRPCAddresses list before 2013-03-05')
666                 if not hasattr(config, 'JSONRPCAddresses'):
667                         config.JSONRPCAddresses = []
668                 config.JSONRPCAddresses.insert(0, config.JSONRPCAddress)
669         LS = []
670         for a in config.JSONRPCAddresses:
671                 LS.append(JSONRPCListener(server, a))
672         if hasattr(config, 'SecretUser'):
673                 server.SecretUser = config.SecretUser
674         server.aux = MM.CoinbaseAux
675         server.getBlockHeader = getBlockHeader
676         server.getBlockTemplate = getBlockTemplate
677         server.receiveShare = receiveShare
678         server.RaiseRedFlags = RaiseRedFlags
679         server.ShareTarget = config.ShareTarget
680         
681         if hasattr(config, 'TrustedForwarders'):
682                 server.TrustedForwarders = config.TrustedForwarders
683         server.ServerName = config.ServerName
684         
685         MM.start()
686         
687         restoreState()
688         
689         prune_thr = threading.Thread(target=WorkLogPruner, args=(workLog,))
690         prune_thr.daemon = True
691         prune_thr.start()
692         
693         bcnode_thr = threading.Thread(target=bcnode.serve_forever)
694         bcnode_thr.daemon = True
695         bcnode_thr.start()
696         
697         server.serve_forever()