2 # Eloipool - Python Bitcoin pool server
3 # Copyright (C) 2011-2013 Luke Dashjr <luke-jr+eloipool@utopios.org>
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License as
7 # published by the Free Software Foundation, either version 3 of the
8 # License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU Affero General Public License for more details.
15 # You should have received a copy of the GNU Affero General Public License
16 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 argparser = argparse.ArgumentParser()
21 argparser.add_argument('-c', '--config', help='Config name to load from config_<ARG>.py')
22 args = argparser.parse_args()
24 if not args.config is None:
25 configmod = 'config_%s' % (args.config,)
27 config = importlib.import_module(configmod)
29 if not hasattr(config, 'ServerName'):
30 config.ServerName = 'Unnamed Eloipool'
32 if not hasattr(config, 'ShareTarget'):
33 config.ShareTarget = 0x00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
37 import logging.handlers
39 rootlogger = logging.getLogger(None)
40 logformat = getattr(config, 'LogFormat', '%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s')
41 logformatter = logging.Formatter(logformat)
42 if len(rootlogger.handlers) == 0:
54 'Waker for JSONRPCServer',
55 'Waker for StratumServer',
58 logging.getLogger(infoOnly).setLevel(logging.INFO)
59 if getattr(config, 'LogToSysLog', False):
60 sysloghandler = logging.handlers.SysLogHandler(address = '/dev/log')
61 rootlogger.addHandler(sysloghandler)
62 if hasattr(config, 'LogFile'):
63 if isinstance(config.LogFile, str):
64 filehandler = logging.FileHandler(config.LogFile)
66 filehandler = logging.handlers.TimedRotatingFileHandler(**config.LogFile)
67 filehandler.setFormatter(logformatter)
68 rootlogger.addHandler(filehandler)
70 def RaiseRedFlags(reason):
71 logging.getLogger('redflag').critical(reason)
75 from bitcoin.node import BitcoinLink, BitcoinNode
76 bcnode = BitcoinNode(config.UpstreamNetworkId)
77 bcnode.userAgent += b'Eloipool:0.1/'
78 bcnode.newBlock = lambda blkhash: MM.updateMerkleTree()
83 import jsonrpc.authproxy
84 jsonrpc.authproxy.USER_AGENT = 'Eloipool/0.1'
89 from bitcoin.script import BitcoinScript
90 from bitcoin.txn import Txn
91 from base58 import b58decode
92 from struct import pack
96 def makeCoinbaseTxn(coinbaseValue, useCoinbaser = True):
99 if useCoinbaser and hasattr(config, 'CoinbaserCmd') and config.CoinbaserCmd:
102 cmd = config.CoinbaserCmd
103 cmd = cmd.replace('%d', str(coinbaseValue))
104 p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
105 nout = int(p.stdout.readline())
106 for i in range(nout):
107 amount = int(p.stdout.readline())
108 addr = p.stdout.readline().rstrip(b'\n').decode('utf8')
109 pkScript = BitcoinScript.toAddress(addr)
110 txn.addOutput(amount, pkScript)
113 coinbased = coinbaseValue + 1
114 if coinbased >= coinbaseValue:
115 logging.getLogger('makeCoinbaseTxn').error('Coinbaser failed!')
118 coinbaseValue -= coinbased
120 pkScript = BitcoinScript.toAddress(config.TrackerAddr)
121 txn.addOutput(coinbaseValue, pkScript)
124 # TODO: red flag on dupe coinbase
128 import jsonrpc_getwork
129 from util import Bits2Target
139 server.wakeLongpoll()
140 stratumsrv.updateJob()
143 global MM, networkTarget, server
144 bits = MM.currentBlock[2]
148 networkTarget = Bits2Target(bits)
149 if MM.lastBlock != (None, None, None):
152 jsonrpc_getwork._CheckForDupesHACK = {}
154 server.wakeLongpoll(wantClear=True)
155 stratumsrv.updateJob(wantClear=True)
158 from time import sleep, time
161 def _WorkLogPruner_I(wl):
165 userwork = wl[username]
166 for wli in tuple(userwork.keys()):
167 if now > userwork[wli][1] + 120:
170 WorkLogPruner.logger.debug('Pruned %d jobs' % (pruned,))
172 def WorkLogPruner(wl):
178 WorkLogPruner.logger.error(traceback.format_exc())
179 WorkLogPruner.logger = logging.getLogger('WorkLogPruner')
182 from merklemaker import merkleMaker
184 MM.__dict__.update(config.__dict__)
185 MM.makeCoinbaseTxn = makeCoinbaseTxn
186 MM.onBlockChange = blockChanged
187 MM.onBlockUpdate = updateBlocks
190 from binascii import b2a_hex
191 from copy import deepcopy
192 from math import ceil, log
193 from merklemaker import MakeBlockHeader
194 from struct import pack, unpack
196 from time import time
197 from util import PendingUpstream, RejectedShare, bdiff1target, dblsha, LEhash2int, swap32, target2bdiff, target2pdiff
202 if hasattr(config, 'GotWorkURI'):
203 gotwork = jsonrpc.ServiceProxy(config.GotWorkURI)
205 if not hasattr(config, 'DelayLogForUpstream'):
206 config.DelayLogForUpstream = False
208 if not hasattr(config, 'DynamicTargetting'):
209 config.DynamicTargetting = 0
211 if not hasattr(config, 'DynamicTargetWindow'):
212 config.DynamicTargetWindow = 120
213 config.DynamicTargetGoal *= config.DynamicTargetWindow / 60
215 def submitGotwork(info):
217 gotwork.gotwork(info)
219 checkShare.logger.warning('Failed to submit gotwork\n' + traceback.format_exc())
221 def clampTarget(target, DTMode):
222 # ShareTarget is the minimum
223 if target is None or target > config.ShareTarget:
224 target = config.ShareTarget
226 # Never target above the network, as we'd lose blocks
227 if target < networkTarget:
228 target = networkTarget
231 # Ceil target to a power of two :)
232 truebits = log(target, 2)
233 if target <= 2**int(truebits):
234 # Workaround for bug in Python's math.log function
235 truebits = int(truebits)
236 target = 2**ceil(truebits) - 1
238 # Round target to multiple of bdiff 1
239 target = bdiff1target / int(round(target2bdiff(target)))
241 # Return None for ShareTarget to save memory
242 if target == config.ShareTarget:
246 def getTarget(username, now, DTMode = None, RequestedTarget = None):
248 DTMode = config.DynamicTargetting
251 if username in userStatus:
252 status = userStatus[username]
254 # No record, use default target
255 RequestedTarget = clampTarget(RequestedTarget, DTMode)
256 userStatus[username] = [RequestedTarget, now, 0]
257 return RequestedTarget
258 (targetIn, lastUpdate, work) = status
259 if work <= config.DynamicTargetGoal:
260 if now < lastUpdate + config.DynamicTargetWindow and (targetIn is None or targetIn >= networkTarget):
261 # No reason to change it just yet
262 return clampTarget(targetIn, DTMode)
264 # No shares received, reset to minimum
266 getTarget.logger.debug("No shares from %s, resetting to minimum target" % (repr(username),))
267 userStatus[username] = [None, now, 0]
268 return clampTarget(None, DTMode)
270 deltaSec = now - lastUpdate
271 target = targetIn or config.ShareTarget
272 target = int(target * config.DynamicTargetGoal * deltaSec / config.DynamicTargetWindow / work)
273 target = clampTarget(target, DTMode)
274 if target != targetIn:
275 pfx = 'Retargetting %s' % (repr(username),)
276 tin = targetIn or config.ShareTarget
277 getTarget.logger.debug("%s from: %064x (pdiff %s)" % (pfx, tin, target2pdiff(tin)))
278 tgt = target or config.ShareTarget
279 getTarget.logger.debug("%s to: %064x (pdiff %s)" % (pfx, tgt, target2pdiff(tgt)))
280 userStatus[username] = [target, now, 0]
282 getTarget.logger = logging.getLogger('getTarget')
284 def TopTargets(n = 0x10):
285 tmp = list(k for k, v in userStatus.items() if not v[0] is None)
286 tmp.sort(key=lambda k: -userStatus[k][0])
290 tmp2[t] = target2pdiff(t)
293 tgt = userStatus[k][0]
294 print('%-34s %064x %3d' % (k, tgt, t2d(tgt)))
296 def RegisterWork(username, wli, wld, RequestedTarget = None):
298 target = getTarget(username, now, RequestedTarget=RequestedTarget)
299 wld = tuple(wld) + (target,)
300 workLog.setdefault(username, {})[wli] = (wld, now)
301 return target or config.ShareTarget
303 def getBlockHeader(username):
306 hdr = MakeBlockHeader(MRD)
307 workLog.setdefault(username, {})[merkleRoot] = (MRD, time())
308 target = RegisterWork(username, merkleRoot, MRD)
309 return (hdr, workLog[username][merkleRoot], target)
311 def getBlockTemplate(username, p_magic = None, RequestedTarget = None):
312 if server.tls.wantClear:
314 elif p_magic and username not in workLog:
319 MC = MM.getMC(wantClear)
320 (dummy, merkleTree, coinbase, prevBlock, bits) = MC[:5]
321 wliPos = coinbase[0] + 2
322 wliLen = coinbase[wliPos - 1]
323 wli = coinbase[wliPos:wliPos+wliLen]
324 target = RegisterWork(username, wli, MC, RequestedTarget=RequestedTarget)
325 return (MC, workLog[username][wli], target)
327 def getStratumJob(jobid, wantClear = False):
328 MC = MM.getMC(wantClear)
329 (dummy, merkleTree, coinbase, prevBlock, bits) = MC[:5]
331 workLog.setdefault(None, {})[jobid] = (MC, now)
332 return (MC, workLog[None][jobid])
334 def getExistingStratumJob(jobid):
335 wld = workLog[None][jobid]
343 from bitcoin.varlen import varlenEncode, varlenDecode
345 from merklemaker import assembleBlock
347 if not hasattr(config, 'BlockSubmissions'):
348 config.BlockSubmissions = None
351 def blockSubmissionThread(payload, blkhash, share):
352 if config.BlockSubmissions is None:
353 servers = list(a for b in MM.TemplateSources for a in b)
355 servers = list(config.BlockSubmissions)
357 if hasattr(share['merkletree'], 'source_uri'):
359 'access': jsonrpc.ServiceProxy(share['merkletree'].source_uri),
360 'name': share['merkletree'].source,
363 servers = list(a for b in MM.TemplateSources for a in b)
365 myblock = (blkhash, payload[4:36])
366 payload = b2a_hex(payload).decode('ascii')
373 UpstreamBitcoindJSONRPC = TS['access']
375 # BIP 22 standard submitblock
376 reason = UpstreamBitcoindJSONRPC.submitblock(payload)
377 except BaseException as gbterr:
378 gbterr_fmt = traceback.format_exc()
381 # bitcoind 0.5/0.6 getmemorypool
382 reason = UpstreamBitcoindJSONRPC.getmemorypool(payload)
384 # Old BIP 22 draft getmemorypool
385 reason = UpstreamBitcoindJSONRPC.getmemorypool(payload, {})
388 elif reason is False:
390 except BaseException as gmperr:
393 # FIXME: This will show "Method not found" on pre-BIP22 servers
394 RaiseRedFlags(gbterr_fmt)
396 if MM.currentBlock[0] not in myblock and tries > len(servers):
397 RBFs.append( (('next block', MM.currentBlock, now, (gbterr, gmperr)), payload, blkhash, share) )
398 RaiseRedFlags('Giving up on submitting block to upstream \'%s\'' % (TS['name'],))
399 if share['upstreamRejectReason'] is PendingUpstream:
400 share['upstreamRejectReason'] = 'GAVE UP'
401 share['upstreamResult'] = False
408 # At this point, we have a reason back
410 # FIXME: The returned value could be a list of multiple responses
411 msg = 'Upstream \'%s\' block submission failed: %s' % (TS['name'], reason,)
412 if success and reason in ('stale-prevblk', 'bad-prevblk', 'orphan', 'duplicate'):
414 blockSubmissionThread.logger.debug(msg)
416 RBFs.append( (('upstream reject', reason, time()), payload, blkhash, share) )
419 blockSubmissionThread.logger.debug('Upstream \'%s\' accepted block' % (TS['name'],))
421 if share['upstreamRejectReason'] is PendingUpstream:
422 share['upstreamRejectReason'] = reason
423 share['upstreamResult'] = not reason
425 blockSubmissionThread.logger = logging.getLogger('blockSubmission')
427 def checkData(share):
430 (prevBlock, height, bits) = MM.currentBlock
431 sharePrevBlock = data[4:36]
432 if sharePrevBlock != prevBlock:
433 if sharePrevBlock == MM.lastBlock[0]:
434 raise RejectedShare('stale-prevblk')
435 raise RejectedShare('bad-prevblk')
437 if data[72:76] != bits:
438 raise RejectedShare('bad-diffbits')
440 # Note that we should accept miners reducing version to 1 if they don't understand 2 yet
441 # FIXME: When the supermajority is upgraded to version 2, stop accepting 1!
442 if data[1:4] != b'\0\0\0' or data[0] > 2:
443 raise RejectedShare('bad-version')
445 def buildStratumData(share, merkleroot):
446 (prevBlock, height, bits) = MM.currentBlock
451 data += share['ntime'][::-1]
453 data += share['nonce'][::-1]
458 def IsJobValid(wli, wluser = None):
459 if wluser not in workLog:
461 if wli not in workLog[wluser]:
463 (wld, issueT) = workLog[wluser][wli]
464 if time() < issueT - 120:
468 def checkShare(share):
469 shareTime = share['time'] = time()
471 username = share['username']
477 if username not in workLog:
478 raise RejectedShare('unknown-user')
479 MWL = workLog[username]
481 shareMerkleRoot = data[36:68]
482 if 'blkdata' in share:
483 pl = share['blkdata']
484 (txncount, pl) = varlenDecode(pl)
485 cbtxn = bitcoin.txn.Txn(pl)
486 othertxndata = cbtxn.disassemble(retExtra=True)
487 coinbase = cbtxn.getCoinbase()
488 wliPos = coinbase[0] + 2
489 wliLen = coinbase[wliPos - 1]
490 wli = coinbase[wliPos:wliPos+wliLen]
494 wli = shareMerkleRoot
502 buildStratumData(share, b'\0' * 32)
508 raise RejectedShare('unknown-work')
509 (wld, issueT) = MWL[wli]
512 share['issuetime'] = issueT
514 (workMerkleTree, workCoinbase) = wld[1:3]
515 share['merkletree'] = workMerkleTree
517 cbtxn = deepcopy(workMerkleTree.data[0])
518 coinbase = workCoinbase + share['extranonce1'] + share['extranonce2']
519 cbtxn.setCoinbase(coinbase)
521 data = buildStratumData(share, workMerkleTree.withFirst(cbtxn))
522 shareMerkleRoot = data[36:68]
524 if data in DupeShareHACK:
525 raise RejectedShare('duplicate')
526 DupeShareHACK[data] = None
528 blkhash = dblsha(data)
529 if blkhash[28:] != b'\0\0\0\0':
530 raise RejectedShare('H-not-zero')
531 blkhashn = LEhash2int(blkhash)
534 logfunc = getattr(checkShare.logger, 'info' if blkhashn <= networkTarget else 'debug')
535 logfunc('BLKHASH: %64x' % (blkhashn,))
536 logfunc(' TARGET: %64x' % (networkTarget,))
538 # NOTE: this isn't actually needed for MC mode, but we're abusing it for a trivial share check...
539 txlist = workMerkleTree.data
540 txlist = [deepcopy(txlist[0]),] + txlist[1:]
542 cbtxn.setCoinbase(coinbase or workCoinbase)
545 if blkhashn <= networkTarget:
546 logfunc("Submitting upstream")
547 RBDs.append( deepcopy( (data, txlist, share.get('blkdata', None), workMerkleTree, share, wld) ) )
549 payload = assembleBlock(data, txlist)
551 payload = share['data']
552 if len(othertxndata):
553 payload += share['blkdata']
555 payload += assembleBlock(data, txlist)[80:]
556 logfunc('Real block payload: %s' % (b2a_hex(payload).decode('utf8'),))
558 threading.Thread(target=blockSubmissionThread, args=(payload, blkhash, share)).start()
559 bcnode.submitBlock(payload)
560 if config.DelayLogForUpstream:
561 share['upstreamRejectReason'] = PendingUpstream
563 share['upstreamRejectReason'] = None
564 share['upstreamResult'] = True
565 MM.updateBlock(blkhash)
568 if gotwork and blkhashn <= config.GotWorkTarget:
570 coinbaseMrkl = cbtxn.data
571 coinbaseMrkl += blkhash
572 steps = workMerkleTree._steps
573 coinbaseMrkl += pack('B', len(steps))
576 coinbaseMrkl += b"\0\0\0\0"
578 info['hash'] = b2a_hex(blkhash).decode('ascii')
579 info['header'] = b2a_hex(data).decode('ascii')
580 info['coinbaseMrkl'] = b2a_hex(coinbaseMrkl).decode('ascii')
581 thr = threading.Thread(target=submitGotwork, args=(info,))
585 checkShare.logger.warning('Failed to build gotwork request')
587 if 'target' in share:
588 workTarget = share['target']
594 if workTarget is None:
595 workTarget = config.ShareTarget
596 if blkhashn > workTarget:
597 raise RejectedShare('high-hash')
598 share['target'] = workTarget
599 share['_targethex'] = '%064x' % (workTarget,)
601 shareTimestamp = unpack('<L', data[68:72])[0]
602 if shareTime < issueT - 120:
603 raise RejectedShare('stale-work')
604 if shareTimestamp < shareTime - 300:
605 raise RejectedShare('time-too-old')
606 if shareTimestamp > shareTime + 7200:
607 raise RejectedShare('time-too-new')
609 if config.DynamicTargetting and username in userStatus:
610 # NOTE: userStatus[username] only doesn't exist across restarts
611 status = userStatus[username]
612 target = status[0] or config.ShareTarget
613 if target == workTarget:
614 userStatus[username][2] += 1
616 userStatus[username][2] += float(target) / workTarget
620 cbpreLen = len(cbpre)
621 if coinbase[:cbpreLen] != cbpre:
622 raise RejectedShare('bad-cb-prefix')
624 # Filter out known "I support" flags, to prevent exploits
625 for ff in (b'/P2SH/', b'NOP2SH', b'p2sh/CHV', b'p2sh/NOCHV'):
626 if coinbase.find(ff) > max(-1, cbpreLen - len(ff)):
627 raise RejectedShare('bad-cb-flag')
629 if len(coinbase) > 100:
630 raise RejectedShare('bad-cb-length')
632 if shareMerkleRoot != workMerkleTree.withFirst(cbtxn):
633 raise RejectedShare('bad-txnmrklroot')
635 if len(othertxndata):
636 allowed = assembleBlock(data, txlist)[80:]
637 if allowed != share['blkdata']:
638 raise RejectedShare('bad-txns')
639 checkShare.logger = logging.getLogger('checkShare')
642 if '_origdata' in share:
643 share['solution'] = share['_origdata']
645 share['solution'] = b2a_hex(swap32(share['data'])).decode('utf8')
646 for i in loggersShare:
649 def receiveShare(share):
650 # TODO: username => userid
653 except RejectedShare as rej:
654 share['rejectReason'] = str(rej)
656 except BaseException as e:
657 share['rejectReason'] = 'ERROR'
660 if not share.get('upstreamRejectReason', None) is PendingUpstream:
663 def newBlockNotification():
664 logging.getLogger('newBlockNotification').info('Received new block notification')
665 MM.updateMerkleTree()
666 # TODO: Force RESPOND TO LONGPOLLS?
669 def newBlockNotificationSIGNAL(signum, frame):
670 # Use a new thread, in case the signal handler is called with locks held
671 thr = threading.Thread(target=newBlockNotification, name='newBlockNotification via signal %s' % (signum,))
675 from signal import signal, SIGUSR1
676 signal(SIGUSR1, newBlockNotificationSIGNAL)
684 from time import sleep
687 SAVE_STATE_FILENAME = 'eloipool.worklog'
690 logger = logging.getLogger('stopServers')
692 if hasattr(stopServers, 'already'):
693 logger.debug('Already tried to stop servers before')
695 stopServers.already = True
697 logger.info('Stopping servers...')
698 global bcnode, server
699 servers = (bcnode, server, stratumsrv)
706 logger.error('Failed to stop server %s\n%s' % (s, traceback.format_exc()))
712 sl.append(s.__class__.__name__)
717 logger.error('Servers taking too long to stop (%s), giving up' % (', '.join(sl)))
722 for fd in s._fd.keys():
726 for i in loggersShare:
727 if hasattr(i, 'stop'):
730 def saveState(t = None):
731 logger = logging.getLogger('saveState')
733 # Then, save data needed to resume work
734 logger.info('Saving work state to \'%s\'...' % (SAVE_STATE_FILENAME,))
738 with open(SAVE_STATE_FILENAME, 'wb') as f:
740 pickle.dump(DupeShareHACK, f)
741 pickle.dump(workLog, f)
746 logger.error('Failed to save work\n' + traceback.format_exc())
748 os.unlink(SAVE_STATE_FILENAME)
750 logger.error(('Failed to unlink \'%s\'; resume may have trouble\n' % (SAVE_STATE_FILENAME,)) + traceback.format_exc())
757 logging.getLogger('exit').info('Goodbye...')
758 os.kill(os.getpid(), signal.SIGTERM)
766 logging.getLogger('restart').info('Restarting...')
768 os.execv(sys.argv[0], sys.argv)
770 logging.getLogger('restart').error('Failed to exec\n' + traceback.format_exc())
773 if not os.path.exists(SAVE_STATE_FILENAME):
776 global workLog, DupeShareHACK
778 logger = logging.getLogger('restoreState')
779 s = os.stat(SAVE_STATE_FILENAME)
780 logger.info('Restoring saved state from \'%s\' (%d bytes)' % (SAVE_STATE_FILENAME, s.st_size))
782 with open(SAVE_STATE_FILENAME, 'rb') as f:
786 # Future formats, not supported here
790 # Old format, from 2012-02-02 to 2012-02-03
795 if isinstance(t, dict):
796 # Old format, from 2012-02-03 to 2012-02-03
800 # Current format, from 2012-02-03 onward
801 DupeShareHACK = pickle.load(f)
803 if t + 120 >= time():
804 workLog = pickle.load(f)
806 logger.debug('Skipping restore of expired workLog')
808 logger.error('Failed to restore state\n' + traceback.format_exc())
810 logger.info('State restored successfully')
812 logger.info('Total downtime: %g seconds' % (time() - t,))
815 from jsonrpcserver import JSONRPCListener, JSONRPCServer
816 import interactivemode
817 from networkserver import NetworkListener
820 from stratumserver import StratumServer
823 if __name__ == "__main__":
824 if not hasattr(config, 'ShareLogging'):
825 config.ShareLogging = ()
826 if hasattr(config, 'DbOptions'):
827 logging.getLogger('backwardCompatibility').warn('DbOptions configuration variable is deprecated; upgrade to ShareLogging var before 2013-03-05')
828 config.ShareLogging = list(config.ShareLogging)
829 config.ShareLogging.append( {
831 'engine': 'postgres',
832 'dbopts': config.DbOptions,
833 'statement': "insert into shares (rem_host, username, our_result, upstream_result, reason, solution) values ({Q(remoteHost)}, {username}, {YN(not(rejectReason))}, {YN(upstreamResult)}, {rejectReason}, decode({solution}, 'hex'))",
835 for i in config.ShareLogging:
836 if not hasattr(i, 'keys'):
838 logging.getLogger('backwardCompatibility').warn('Using short-term backward compatibility for ShareLogging[\'%s\']; be sure to update config before 2012-04-04' % (name,))
839 if name == 'postgres':
842 'engine': 'postgres',
843 'dbopts': parameters,
845 elif name == 'logfile':
847 i['thropts'] = parameters
848 if 'filename' in parameters:
849 i['filename'] = parameters['filename']
850 i['thropts'] = dict(i['thropts'])
851 del i['thropts']['filename']
859 fp, pathname, description = imp.find_module(name, sharelogging.__path__)
860 m = imp.load_module(name, fp, pathname, description)
861 lo = getattr(m, name)(**parameters)
862 loggersShare.append(lo)
864 logging.getLogger('sharelogging').error("Error setting up share logger %s: %s", name, sys.exc_info())
867 if not hasattr(config, 'BitcoinNodeAddresses'):
868 config.BitcoinNodeAddresses = ()
869 for a in config.BitcoinNodeAddresses:
870 LSbc.append(NetworkListener(bcnode, a))
872 if hasattr(config, 'UpstreamBitcoindNode') and config.UpstreamBitcoindNode:
873 BitcoinLink(bcnode, dest=config.UpstreamBitcoindNode)
875 import jsonrpc_getblocktemplate
876 import jsonrpc_getwork
877 import jsonrpc_setworkaux
879 server = JSONRPCServer()
880 server.tls = threading.local()
881 server.tls.wantClear = False
882 if hasattr(config, 'JSONRPCAddress'):
883 logging.getLogger('backwardCompatibility').warn('JSONRPCAddress configuration variable is deprecated; upgrade to JSONRPCAddresses list before 2013-03-05')
884 if not hasattr(config, 'JSONRPCAddresses'):
885 config.JSONRPCAddresses = []
886 config.JSONRPCAddresses.insert(0, config.JSONRPCAddress)
888 for a in config.JSONRPCAddresses:
889 LS.append(JSONRPCListener(server, a))
890 if hasattr(config, 'SecretUser'):
891 server.SecretUser = config.SecretUser
892 server.aux = MM.CoinbaseAux
893 server.getBlockHeader = getBlockHeader
894 server.getBlockTemplate = getBlockTemplate
895 server.receiveShare = receiveShare
896 server.RaiseRedFlags = RaiseRedFlags
897 server.ShareTarget = config.ShareTarget
899 if hasattr(config, 'TrustedForwarders'):
900 server.TrustedForwarders = config.TrustedForwarders
901 server.ServerName = config.ServerName
903 stratumsrv = StratumServer()
904 stratumsrv.getStratumJob = getStratumJob
905 stratumsrv.getExistingStratumJob = getExistingStratumJob
906 stratumsrv.receiveShare = receiveShare
907 stratumsrv.getTarget = getTarget
908 stratumsrv.defaultTarget = config.ShareTarget
909 stratumsrv.IsJobValid = IsJobValid
910 if not hasattr(config, 'StratumAddresses'):
911 config.StratumAddresses = ()
912 for a in config.StratumAddresses:
913 NetworkListener(stratumsrv, a)
919 prune_thr = threading.Thread(target=WorkLogPruner, args=(workLog,))
920 prune_thr.daemon = True
923 bcnode_thr = threading.Thread(target=bcnode.serve_forever)
924 bcnode_thr.daemon = True
927 stratum_thr = threading.Thread(target=stratumsrv.serve_forever)
928 stratum_thr.daemon = True
931 server.serve_forever()