Merge branch 'dyntarget'
[bitcoin:eloipool.git] / eloipool.py
1 #!/usr/bin/python3
2 # Eloipool - Python Bitcoin pool server
3 # Copyright (C) 2011-2012  Luke Dashjr <luke-jr+eloipool@utopios.org>
4 #
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License as
7 # published by the Free Software Foundation, either version 3 of the
8 # License, or (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 # GNU Affero General Public License for more details.
14 #
15 # You should have received a copy of the GNU Affero General Public License
16 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
17
18 import config
19
20 if not hasattr(config, 'ServerName'):
21         config.ServerName = 'Unnamed Eloipool'
22
23 if not hasattr(config, 'ShareTarget'):
24         config.ShareTarget = 0x00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
25
26
27 import logging
28
29 if len(logging.root.handlers) == 0:
30         logging.basicConfig(
31                 format='%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s',
32                 level=logging.DEBUG,
33         )
34         for infoOnly in ('checkShare', 'JSONRPCHandler', 'merkleMaker', 'Waker for JSONRPCServer', 'JSONRPCServer'):
35                 logging.getLogger(infoOnly).setLevel(logging.INFO)
36
37 def RaiseRedFlags(reason):
38         logging.getLogger('redflag').critical(reason)
39         return reason
40
41
42 from bitcoin.node import BitcoinLink, BitcoinNode
43 bcnode = BitcoinNode(config.UpstreamNetworkId)
44 bcnode.userAgent += b'Eloipool:0.1/'
45
46 import jsonrpc
47 UpstreamBitcoindJSONRPC = jsonrpc.ServiceProxy(config.UpstreamURI)
48
49
50 from bitcoin.script import BitcoinScript
51 from bitcoin.txn import Txn
52 from base58 import b58decode
53 from struct import pack
54 import subprocess
55 from time import time
56
57 def makeCoinbaseTxn(coinbaseValue, useCoinbaser = True):
58         txn = Txn.new()
59         
60         if useCoinbaser and hasattr(config, 'CoinbaserCmd') and config.CoinbaserCmd:
61                 coinbased = 0
62                 try:
63                         cmd = config.CoinbaserCmd
64                         cmd = cmd.replace('%d', str(coinbaseValue))
65                         p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
66                         nout = int(p.stdout.readline())
67                         for i in range(nout):
68                                 amount = int(p.stdout.readline())
69                                 addr = p.stdout.readline().rstrip(b'\n').decode('utf8')
70                                 pkScript = BitcoinScript.toAddress(addr)
71                                 txn.addOutput(amount, pkScript)
72                                 coinbased += amount
73                 except:
74                         coinbased = coinbaseValue + 1
75                 if coinbased >= coinbaseValue:
76                         logging.getLogger('makeCoinbaseTxn').error('Coinbaser failed!')
77                         txn.outputs = []
78                 else:
79                         coinbaseValue -= coinbased
80         
81         pkScript = BitcoinScript.toAddress(config.TrackerAddr)
82         txn.addOutput(coinbaseValue, pkScript)
83         
84         # TODO
85         # TODO: red flag on dupe coinbase
86         return txn
87
88
89 import jsonrpc_getwork
90 from util import Bits2Target
91
92 workLog = {}
93 userStatus = {}
94 networkTarget = None
95 DupeShareHACK = {}
96
97 server = None
98 def updateBlocks():
99         server.wakeLongpoll()
100
101 def blockChanged():
102         global DupeShareHACK
103         DupeShareHACK = {}
104         jsonrpc_getwork._CheckForDupesHACK = {}
105         global MM, networkTarget, server
106         bits = MM.currentBlock[2]
107         if bits is None:
108                 networkTarget = None
109         else:
110                 networkTarget = Bits2Target(bits)
111         workLog.clear()
112         updateBlocks()
113
114
115 from time import sleep, time
116 import traceback
117
118 def _WorkLogPruner_I(wl):
119         now = time()
120         pruned = 0
121         for username in wl:
122                 userwork = wl[username]
123                 for wli in tuple(userwork.keys()):
124                         if now > userwork[wli][1] + 120:
125                                 del userwork[wli]
126                                 pruned += 1
127         WorkLogPruner.logger.debug('Pruned %d jobs' % (pruned,))
128
129 def WorkLogPruner(wl):
130         while True:
131                 try:
132                         sleep(60)
133                         _WorkLogPruner_I(wl)
134                 except:
135                         WorkLogPruner.logger.error(traceback.format_exc())
136 WorkLogPruner.logger = logging.getLogger('WorkLogPruner')
137
138
139 from merklemaker import merkleMaker
140 MM = merkleMaker()
141 MM.__dict__.update(config.__dict__)
142 MM.clearCoinbaseTxn = makeCoinbaseTxn(5000000000, False)  # FIXME
143 MM.clearCoinbaseTxn.assemble()
144 MM.makeCoinbaseTxn = makeCoinbaseTxn
145 MM.onBlockChange = blockChanged
146 MM.onBlockUpdate = updateBlocks
147
148
149 from binascii import b2a_hex
150 from copy import deepcopy
151 from math import log
152 from merklemaker import MakeBlockHeader
153 from struct import pack, unpack
154 import threading
155 from time import time
156 from util import RejectedShare, dblsha, hash2int, swap32, target2pdiff
157 import jsonrpc
158 import traceback
159
160 gotwork = None
161 if hasattr(config, 'GotWorkURI'):
162         gotwork = jsonrpc.ServiceProxy(config.GotWorkURI)
163
164 if not hasattr(config, 'DynamicTargetting'):
165         config.DynamicTargetting = 0
166 else:
167         if not hasattr(config, 'DynamicTargetWindow'):
168                 config.DynamicTargetWindow = 120
169         config.DynamicTargetGoal *= config.DynamicTargetWindow / 60
170
171 def submitGotwork(info):
172         try:
173                 gotwork.gotwork(info)
174         except:
175                 checkShare.logger.warning('Failed to submit gotwork\n' + traceback.format_exc())
176
177 def getTarget(username, now):
178         if not config.DynamicTargetting:
179                 return None
180         if username in userStatus:
181                 status = userStatus[username]
182         else:
183                 userStatus[username] = [None, now, 0]
184                 return None
185         (targetIn, lastUpdate, work) = status
186         if work <= config.DynamicTargetGoal:
187                 if now < lastUpdate + config.DynamicTargetWindow and (targetIn is None or targetIn >= networkTarget):
188                         return targetIn
189                 if not work:
190                         if targetIn:
191                                 getTarget.logger.debug("No shares from '%s', resetting to minimum target")
192                                 userStatus[username] = [None, now, 0]
193                         return None
194         
195         deltaSec = now - lastUpdate
196         target = targetIn or config.ShareTarget
197         target = int(target * config.DynamicTargetGoal * deltaSec / config.DynamicTargetWindow / work)
198         if target >= config.ShareTarget:
199                 target = None
200         else:
201                 if target < networkTarget:
202                         target = networkTarget
203                 if config.DynamicTargetting == 2:
204                         # Round target to a power of two :)
205                         target = 2**int(log(target, 2) + 1) - 1
206                 if target == config.ShareTarget:
207                         target = None
208         if target != targetIn:
209                 pfx = 'Retargetting %s' % (repr(username),)
210                 tin = targetIn or config.ShareTarget
211                 getTarget.logger.debug("%s from: %064x (pdiff %s)" % (pfx, tin, target2pdiff(tin)))
212                 tgt = target or config.ShareTarget
213                 getTarget.logger.debug("%s   to: %064x (pdiff %s)" % (pfx, tgt, target2pdiff(tgt)))
214         userStatus[username] = [target, now, 0]
215         return target
216 getTarget.logger = logging.getLogger('getTarget')
217
218 def RegisterWork(username, wli, wld):
219         now = time()
220         target = getTarget(username, now)
221         wld = tuple(wld) + (target,)
222         workLog.setdefault(username, {})[wli] = (wld, now)
223         return target or config.ShareTarget
224
225 def getBlockHeader(username):
226         MRD = MM.getMRD()
227         merkleRoot = MRD[0]
228         hdr = MakeBlockHeader(MRD)
229         workLog.setdefault(username, {})[merkleRoot] = (MRD, time())
230         target = RegisterWork(username, merkleRoot, MRD)
231         return (hdr, workLog[username][merkleRoot], target)
232
233 def getBlockTemplate(username):
234         MC = MM.getMC()
235         (dummy, merkleTree, coinbase, prevBlock, bits) = MC[:5]
236         wliPos = coinbase[0] + 2
237         wliLen = coinbase[wliPos - 1]
238         wli = coinbase[wliPos:wliPos+wliLen]
239         target = RegisterWork(username, wli, MC)
240         return (MC, workLog[username][wli], target)
241
242 loggersShare = []
243
244 RBDs = []
245 RBPs = []
246
247 from bitcoin.varlen import varlenEncode, varlenDecode
248 import bitcoin.txn
249 from merklemaker import assembleBlock
250
251 def blockSubmissionThread(payload, blkhash):
252         myblock = (blkhash, payload[4:36])
253         payload = b2a_hex(payload).decode('ascii')
254         nexterr = 0
255         while True:
256                 try:
257                         rv = UpstreamBitcoindJSONRPC.submitblock(payload)
258                         break
259                 except:
260                         try:
261                                 rv = UpstreamBitcoindJSONRPC.getmemorypool(payload)
262                                 if rv is True:
263                                         rv = None
264                                 elif rv is False:
265                                         rv = 'rejected'
266                                 break
267                         except:
268                                 pass
269                         now = time()
270                         if now > nexterr:
271                                 # FIXME: This will show "Method not found" on pre-BIP22 servers
272                                 RaiseRedFlags(traceback.format_exc())
273                                 nexterr = now + 5
274                         if MM.currentBlock[0] not in myblock:
275                                 RaiseRedFlags('Giving up on submitting block upstream')
276                                 return
277         if rv:
278                 # FIXME: The returned value could be a list of multiple responses
279                 RaiseRedFlags('Upstream block submission failed: %s' % (rv,))
280
281 def checkShare(share):
282         shareTime = share['time'] = time()
283         
284         data = share['data']
285         data = data[:80]
286         (prevBlock, height, bits) = MM.currentBlock
287         sharePrevBlock = data[4:36]
288         if sharePrevBlock != prevBlock:
289                 if sharePrevBlock == MM.lastBlock[0]:
290                         raise RejectedShare('stale-prevblk')
291                 raise RejectedShare('bad-prevblk')
292         
293         # TODO: use userid
294         username = share['username']
295         if username not in workLog:
296                 raise RejectedShare('unknown-user')
297         
298         if data[72:76] != bits:
299                 raise RejectedShare('bad-diffbits')
300         
301         # Note that we should accept miners reducing version to 1 if they don't understand 2 yet
302         # FIXME: When the supermajority is upgraded to version 2, stop accepting 1!
303         if data[1:4] != b'\0\0\0' or data[0] > 2:
304                 raise RejectedShare('bad-version')
305         
306         shareMerkleRoot = data[36:68]
307         if 'blkdata' in share:
308                 pl = share['blkdata']
309                 (txncount, pl) = varlenDecode(pl)
310                 cbtxn = bitcoin.txn.Txn(pl)
311                 othertxndata = cbtxn.disassemble(retExtra=True)
312                 coinbase = cbtxn.getCoinbase()
313                 wliPos = coinbase[0] + 2
314                 wliLen = coinbase[wliPos - 1]
315                 wli = coinbase[wliPos:wliPos+wliLen]
316                 mode = 'MC'
317                 moden = 1
318         else:
319                 wli = shareMerkleRoot
320                 mode = 'MRD'
321                 moden = 0
322         
323         MWL = workLog[username]
324         if wli not in MWL:
325                 raise RejectedShare('unknown-work')
326         (wld, issueT) = MWL[wli]
327         share[mode] = wld
328         
329         if data in DupeShareHACK:
330                 raise RejectedShare('duplicate')
331         DupeShareHACK[data] = None
332         
333         blkhash = dblsha(data)
334         if blkhash[28:] != b'\0\0\0\0':
335                 raise RejectedShare('H-not-zero')
336         blkhashn = hash2int(blkhash)
337         
338         global networkTarget
339         logfunc = getattr(checkShare.logger, 'info' if blkhashn <= networkTarget else 'debug')
340         logfunc('BLKHASH: %64x' % (blkhashn,))
341         logfunc(' TARGET: %64x' % (networkTarget,))
342         
343         workMerkleTree = wld[1]
344         workCoinbase = wld[2]
345         workTarget = wld[6]
346         
347         # NOTE: this isn't actually needed for MC mode, but we're abusing it for a trivial share check...
348         txlist = workMerkleTree.data
349         txlist = [deepcopy(txlist[0]),] + txlist[1:]
350         cbtxn = txlist[0]
351         cbtxn.setCoinbase(workCoinbase)
352         cbtxn.assemble()
353         
354         if blkhashn <= networkTarget:
355                 logfunc("Submitting upstream")
356                 RBDs.append( deepcopy( (data, txlist, share.get('blkdata', None), workMerkleTree) ) )
357                 if not moden:
358                         payload = assembleBlock(data, txlist)
359                 else:
360                         payload = share['data'] + share['blkdata']
361                 logfunc('Real block payload: %s' % (b2a_hex(payload).decode('utf8'),))
362                 RBPs.append(payload)
363                 threading.Thread(target=blockSubmissionThread, args=(payload, blkhash)).start()
364                 bcnode.submitBlock(payload)
365                 share['upstreamResult'] = True
366                 MM.updateBlock(blkhash)
367         
368         # Gotwork hack...
369         if gotwork and blkhashn <= config.GotWorkTarget:
370                 try:
371                         coinbaseMrkl = cbtxn.data
372                         coinbaseMrkl += blkhash
373                         steps = workMerkleTree._steps
374                         coinbaseMrkl += pack('B', len(steps))
375                         for step in steps:
376                                 coinbaseMrkl += step
377                         coinbaseMrkl += b"\0\0\0\0"
378                         info = {}
379                         info['hash'] = b2a_hex(blkhash).decode('ascii')
380                         info['header'] = b2a_hex(data).decode('ascii')
381                         info['coinbaseMrkl'] = b2a_hex(coinbaseMrkl).decode('ascii')
382                         thr = threading.Thread(target=submitGotwork, args=(info,))
383                         thr.daemon = True
384                         thr.start()
385                 except:
386                         checkShare.logger.warning('Failed to build gotwork request')
387         
388         if workTarget is None:
389                 workTarget = config.ShareTarget
390         if blkhashn > workTarget:
391                 raise RejectedShare('high-hash')
392         share['target'] = workTarget
393         share['_targethex'] = '%064x' % (workTarget,)
394         
395         shareTimestamp = unpack('<L', data[68:72])[0]
396         if shareTime < issueT - 120:
397                 raise RejectedShare('stale-work')
398         if shareTimestamp < shareTime - 300:
399                 raise RejectedShare('time-too-old')
400         if shareTimestamp > shareTime + 7200:
401                 raise RejectedShare('time-too-new')
402         
403         if config.DynamicTargetting and username in userStatus:
404                 # NOTE: userStatus[username] only doesn't exist across restarts
405                 status = userStatus[username]
406                 target = status[0] or config.ShareTarget
407                 if target == workTarget:
408                         userStatus[username][2] += 1
409                 else:
410                         userStatus[username][2] += float(target) / workTarget
411         
412         if moden:
413                 cbpre = cbtxn.getCoinbase()
414                 cbpreLen = len(cbpre)
415                 if coinbase[:cbpreLen] != cbpre:
416                         raise RejectedShare('bad-cb-prefix')
417                 
418                 # Filter out known "I support" flags, to prevent exploits
419                 for ff in (b'/P2SH/', b'NOP2SH', b'p2sh/CHV', b'p2sh/NOCHV'):
420                         if coinbase.find(ff) > max(-1, cbpreLen - len(ff)):
421                                 raise RejectedShare('bad-cb-flag')
422                 
423                 if len(coinbase) > 100:
424                         raise RejectedShare('bad-cb-length')
425                 
426                 cbtxn.setCoinbase(coinbase)
427                 cbtxn.assemble()
428                 if shareMerkleRoot != workMerkleTree.withFirst(cbtxn):
429                         raise RejectedShare('bad-txnmrklroot')
430                 
431                 if len(othertxndata):
432                         allowed = assembleBlock(data, txlist)[80:]
433                         if allowed != share['blkdata']:
434                                 raise RejectedShare('bad-txns')
435 checkShare.logger = logging.getLogger('checkShare')
436
437 def receiveShare(share):
438         # TODO: username => userid
439         try:
440                 checkShare(share)
441         except RejectedShare as rej:
442                 share['rejectReason'] = str(rej)
443                 raise
444         finally:
445                 if '_origdata' in share:
446                         share['solution'] = share['_origdata']
447                 else:
448                         share['solution'] = b2a_hex(swap32(share['data'])).decode('utf8')
449                 for i in loggersShare:
450                         i(share)
451
452 def newBlockNotification():
453         logging.getLogger('newBlockNotification').info('Received new block notification')
454         MM.updateMerkleTree()
455         # TODO: Force RESPOND TO LONGPOLLS?
456         pass
457
458 def newBlockNotificationSIGNAL(signum, frame):
459         # Use a new thread, in case the signal handler is called with locks held
460         thr = threading.Thread(target=newBlockNotification, name='newBlockNotification via signal %s' % (signum,))
461         thr.daemon = True
462         thr.start()
463
464 from signal import signal, SIGUSR1
465 signal(SIGUSR1, newBlockNotificationSIGNAL)
466
467
468 import os
469 import os.path
470 import pickle
471 import signal
472 import sys
473 from time import sleep
474 import traceback
475
476 SAVE_STATE_FILENAME = 'eloipool.worklog'
477
478 def stopServers():
479         logger = logging.getLogger('stopServers')
480         
481         if hasattr(stopServers, 'already'):
482                 logger.debug('Already tried to stop servers before')
483                 return
484         stopServers.already = True
485         
486         logger.info('Stopping servers...')
487         global bcnode, server
488         servers = (bcnode, server)
489         for s in servers:
490                 s.keepgoing = False
491         for s in servers:
492                 try:
493                         s.wakeup()
494                 except:
495                         logger.error('Failed to stop server %s\n%s' % (s, traceback.format_exc()))
496         i = 0
497         while True:
498                 sl = []
499                 for s in servers:
500                         if s.running:
501                                 sl.append(s.__class__.__name__)
502                 if not sl:
503                         break
504                 i += 1
505                 if i >= 0x100:
506                         logger.error('Servers taking too long to stop (%s), giving up' % (', '.join(sl)))
507                         break
508                 sleep(0.01)
509         
510         for s in servers:
511                 for fd in s._fd.keys():
512                         os.close(fd)
513
514 def saveState(t = None):
515         logger = logging.getLogger('saveState')
516         
517         # Then, save data needed to resume work
518         logger.info('Saving work state to \'%s\'...' % (SAVE_STATE_FILENAME,))
519         i = 0
520         while True:
521                 try:
522                         with open(SAVE_STATE_FILENAME, 'wb') as f:
523                                 pickle.dump(t, f)
524                                 pickle.dump(DupeShareHACK, f)
525                                 pickle.dump(workLog, f)
526                         break
527                 except:
528                         i += 1
529                         if i >= 0x10000:
530                                 logger.error('Failed to save work\n' + traceback.format_exc())
531                                 try:
532                                         os.unlink(SAVE_STATE_FILENAME)
533                                 except:
534                                         logger.error(('Failed to unlink \'%s\'; resume may have trouble\n' % (SAVE_STATE_FILENAME,)) + traceback.format_exc())
535
536 def exit():
537         t = time()
538         stopServers()
539         saveState(t)
540         logging.getLogger('exit').info('Goodbye...')
541         os.kill(os.getpid(), signal.SIGTERM)
542         sys.exit(0)
543
544 def restart():
545         t = time()
546         stopServers()
547         saveState(t)
548         logging.getLogger('restart').info('Restarting...')
549         try:
550                 os.execv(sys.argv[0], sys.argv)
551         except:
552                 logging.getLogger('restart').error('Failed to exec\n' + traceback.format_exc())
553
554 def restoreState():
555         if not os.path.exists(SAVE_STATE_FILENAME):
556                 return
557         
558         global workLog, DupeShareHACK
559         
560         logger = logging.getLogger('restoreState')
561         s = os.stat(SAVE_STATE_FILENAME)
562         logger.info('Restoring saved state from \'%s\' (%d bytes)' % (SAVE_STATE_FILENAME, s.st_size))
563         try:
564                 with open(SAVE_STATE_FILENAME, 'rb') as f:
565                         t = pickle.load(f)
566                         if type(t) == tuple:
567                                 if len(t) > 2:
568                                         # Future formats, not supported here
569                                         ver = t[3]
570                                         # TODO
571                                 
572                                 # Old format, from 2012-02-02 to 2012-02-03
573                                 workLog = t[0]
574                                 DupeShareHACK = t[1]
575                                 t = None
576                         else:
577                                 if isinstance(t, dict):
578                                         # Old format, from 2012-02-03 to 2012-02-03
579                                         DupeShareHACK = t
580                                         t = None
581                                 else:
582                                         # Current format, from 2012-02-03 onward
583                                         DupeShareHACK = pickle.load(f)
584                                 
585                                 if t + 120 >= time():
586                                         workLog = pickle.load(f)
587                                 else:
588                                         logger.debug('Skipping restore of expired workLog')
589         except:
590                 logger.error('Failed to restore state\n' + traceback.format_exc())
591                 return
592         logger.info('State restored successfully')
593         if t:
594                 logger.info('Total downtime: %g seconds' % (time() - t,))
595
596
597 from jsonrpcserver import JSONRPCListener, JSONRPCServer
598 import interactivemode
599 from networkserver import NetworkListener
600 import threading
601 import sharelogging
602 import imp
603
604 if __name__ == "__main__":
605         if not hasattr(config, 'ShareLogging'):
606                 config.ShareLogging = ()
607         if hasattr(config, 'DbOptions'):
608                 logging.getLogger('backwardCompatibility').warn('DbOptions configuration variable is deprecated; upgrade to ShareLogging var before 2013-03-05')
609                 config.ShareLogging = list(config.ShareLogging)
610                 config.ShareLogging.append( {
611                         'type': 'sql',
612                         'engine': 'postgres',
613                         'dbopts': config.DbOptions,
614                         'statement': "insert into shares (rem_host, username, our_result, upstream_result, reason, solution) values ({Q(remoteHost)}, {username}, {YN(not(rejectReason))}, {YN(upstreamResult)}, {rejectReason}, decode({solution}, 'hex'))",
615                 } )
616         for i in config.ShareLogging:
617                 if not hasattr(i, 'keys'):
618                         name, parameters = i
619                         logging.getLogger('backwardCompatibility').warn('Using short-term backward compatibility for ShareLogging[\'%s\']; be sure to update config before 2012-04-04' % (name,))
620                         if name == 'postgres':
621                                 name = 'sql'
622                                 i = {
623                                         'engine': 'postgres',
624                                         'dbopts': parameters,
625                                 }
626                         elif name == 'logfile':
627                                 i = {}
628                                 i['thropts'] = parameters
629                                 if 'filename' in parameters:
630                                         i['filename'] = parameters['filename']
631                                         i['thropts'] = dict(i['thropts'])
632                                         del i['thropts']['filename']
633                         else:
634                                 i = parameters
635                         i['type'] = name
636                 
637                 name = i['type']
638                 parameters = i
639                 try:
640                         fp, pathname, description = imp.find_module(name, sharelogging.__path__)
641                         m = imp.load_module(name, fp, pathname, description)
642                         lo = getattr(m, name)(**parameters)
643                         loggersShare.append(lo.logShare)
644                 except:
645                         logging.getLogger('sharelogging').error("Error setting up share logger %s: %s", name,  sys.exc_info())
646
647         LSbc = []
648         if not hasattr(config, 'BitcoinNodeAddresses'):
649                 config.BitcoinNodeAddresses = ()
650         for a in config.BitcoinNodeAddresses:
651                 LSbc.append(NetworkListener(bcnode, a))
652         
653         if hasattr(config, 'UpstreamBitcoindNode') and config.UpstreamBitcoindNode:
654                 BitcoinLink(bcnode, dest=config.UpstreamBitcoindNode)
655         
656         import jsonrpc_getblocktemplate
657         import jsonrpc_getwork
658         import jsonrpc_setworkaux
659         
660         server = JSONRPCServer()
661         if hasattr(config, 'JSONRPCAddress'):
662                 logging.getLogger('backwardCompatibility').warn('JSONRPCAddress configuration variable is deprecated; upgrade to JSONRPCAddresses list before 2013-03-05')
663                 if not hasattr(config, 'JSONRPCAddresses'):
664                         config.JSONRPCAddresses = []
665                 config.JSONRPCAddresses.insert(0, config.JSONRPCAddress)
666         LS = []
667         for a in config.JSONRPCAddresses:
668                 LS.append(JSONRPCListener(server, a))
669         if hasattr(config, 'SecretUser'):
670                 server.SecretUser = config.SecretUser
671         server.aux = MM.CoinbaseAux
672         server.getBlockHeader = getBlockHeader
673         server.getBlockTemplate = getBlockTemplate
674         server.receiveShare = receiveShare
675         server.RaiseRedFlags = RaiseRedFlags
676         server.ShareTarget = config.ShareTarget
677         
678         if hasattr(config, 'TrustedForwarders'):
679                 server.TrustedForwarders = config.TrustedForwarders
680         server.ServerName = config.ServerName
681         
682         MM.start()
683         
684         restoreState()
685         
686         prune_thr = threading.Thread(target=WorkLogPruner, args=(workLog,))
687         prune_thr.daemon = True
688         prune_thr.start()
689         
690         bcnode_thr = threading.Thread(target=bcnode.serve_forever)
691         bcnode_thr.daemon = True
692         bcnode_thr.start()
693         
694         server.serve_forever()