2 # Eloipool - Python Bitcoin pool server
3 # Copyright (C) 2011-2013 Luke Dashjr <luke-jr+eloipool@utopios.org>
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License as
7 # published by the Free Software Foundation, either version 3 of the
8 # License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU Affero General Public License for more details.
15 # You should have received a copy of the GNU Affero General Public License
16 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 argparser = argparse.ArgumentParser()
21 argparser.add_argument('-c', '--config', help='Config name to load from config_<ARG>.py')
22 args = argparser.parse_args()
24 if not args.config is None:
25 configmod = 'config_%s' % (args.config,)
26 config = importlib.import_module(configmod)
28 if not hasattr(config, 'ServerName'):
29 config.ServerName = 'Unnamed Eloipool'
31 if not hasattr(config, 'ShareTarget'):
32 config.ShareTarget = 0x00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff
36 import logging.handlers
38 rootlogger = logging.getLogger(None)
39 logformat = getattr(config, 'LogFormat', '%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s')
40 logformatter = logging.Formatter(logformat)
41 if len(rootlogger.handlers) == 0:
53 'Waker for JSONRPCServer',
54 'Waker for StratumServer',
57 logging.getLogger(infoOnly).setLevel(logging.INFO)
58 if getattr(config, 'LogToSysLog', False):
59 sysloghandler = logging.handlers.SysLogHandler(address = '/dev/log')
60 rootlogger.addHandler(sysloghandler)
61 if hasattr(config, 'LogFile'):
62 if isinstance(config.LogFile, str):
63 filehandler = logging.FileHandler(config.LogFile)
65 filehandler = logging.handlers.TimedRotatingFileHandler(**config.LogFile)
66 filehandler.setFormatter(logformatter)
67 rootlogger.addHandler(filehandler)
69 def RaiseRedFlags(reason):
70 logging.getLogger('redflag').critical(reason)
74 from bitcoin.node import BitcoinLink, BitcoinNode
75 bcnode = BitcoinNode(config.UpstreamNetworkId)
76 bcnode.userAgent += b'Eloipool:0.1/'
77 bcnode.newBlock = lambda blkhash: MM.updateMerkleTree()
82 import jsonrpc.authproxy
83 jsonrpc.authproxy.USER_AGENT = 'Eloipool/0.1'
88 from bitcoin.script import BitcoinScript
89 from bitcoin.txn import Txn
90 from base58 import b58decode
91 from struct import pack
95 def makeCoinbaseTxn(coinbaseValue, useCoinbaser = True):
98 if useCoinbaser and hasattr(config, 'CoinbaserCmd') and config.CoinbaserCmd:
101 cmd = config.CoinbaserCmd
102 cmd = cmd.replace('%d', str(coinbaseValue))
103 p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
104 nout = int(p.stdout.readline())
105 for i in range(nout):
106 amount = int(p.stdout.readline())
107 addr = p.stdout.readline().rstrip(b'\n').decode('utf8')
108 pkScript = BitcoinScript.toAddress(addr)
109 txn.addOutput(amount, pkScript)
112 coinbased = coinbaseValue + 1
113 if coinbased >= coinbaseValue:
114 logging.getLogger('makeCoinbaseTxn').error('Coinbaser failed!')
117 coinbaseValue -= coinbased
119 pkScript = BitcoinScript.toAddress(config.TrackerAddr)
120 txn.addOutput(coinbaseValue, pkScript)
123 # TODO: red flag on dupe coinbase
127 import jsonrpc_getwork
128 from util import Bits2Target
138 server.wakeLongpoll()
139 stratumsrv.updateJob()
142 global MM, networkTarget, server
143 bits = MM.currentBlock[2]
147 networkTarget = Bits2Target(bits)
148 if MM.lastBlock != (None, None, None):
151 jsonrpc_getwork._CheckForDupesHACK = {}
153 server.wakeLongpoll(wantClear=True)
154 stratumsrv.updateJob(wantClear=True)
157 from time import sleep, time
160 def _WorkLogPruner_I(wl):
164 userwork = wl[username]
165 for wli in tuple(userwork.keys()):
166 if now > userwork[wli][1] + 120:
169 WorkLogPruner.logger.debug('Pruned %d jobs' % (pruned,))
171 def WorkLogPruner(wl):
177 WorkLogPruner.logger.error(traceback.format_exc())
178 WorkLogPruner.logger = logging.getLogger('WorkLogPruner')
181 from merklemaker import merkleMaker
183 MM.__dict__.update(config.__dict__)
184 MM.makeCoinbaseTxn = makeCoinbaseTxn
185 MM.onBlockChange = blockChanged
186 MM.onBlockUpdate = updateBlocks
189 from binascii import b2a_hex
190 from copy import deepcopy
191 from math import ceil, log
192 from merklemaker import MakeBlockHeader
193 from struct import pack, unpack
195 from time import time
196 from util import PendingUpstream, RejectedShare, bdiff1target, dblsha, LEhash2int, swap32, target2bdiff, target2pdiff
201 if hasattr(config, 'GotWorkURI'):
202 gotwork = jsonrpc.ServiceProxy(config.GotWorkURI)
204 if not hasattr(config, 'DelayLogForUpstream'):
205 config.DelayLogForUpstream = False
207 if not hasattr(config, 'DynamicTargetting'):
208 config.DynamicTargetting = 0
210 if not hasattr(config, 'DynamicTargetWindow'):
211 config.DynamicTargetWindow = 120
212 config.DynamicTargetGoal *= config.DynamicTargetWindow / 60
214 def submitGotwork(info):
216 gotwork.gotwork(info)
218 checkShare.logger.warning('Failed to submit gotwork\n' + traceback.format_exc())
220 def clampTarget(target, DTMode):
221 # ShareTarget is the minimum
222 if target is None or target > config.ShareTarget:
223 target = config.ShareTarget
225 # Never target above the network, as we'd lose blocks
226 if target < networkTarget:
227 target = networkTarget
230 # Ceil target to a power of two :)
231 truebits = log(target, 2)
232 if target <= 2**int(truebits):
233 # Workaround for bug in Python's math.log function
234 truebits = int(truebits)
235 target = 2**ceil(truebits) - 1
237 # Round target to multiple of bdiff 1
238 target = bdiff1target / int(round(target2bdiff(target)))
240 # Return None for ShareTarget to save memory
241 if target == config.ShareTarget:
245 def getTarget(username, now, DTMode = None, RequestedTarget = None):
247 DTMode = config.DynamicTargetting
250 if username in userStatus:
251 status = userStatus[username]
253 # No record, use default target
254 RequestedTarget = clampTarget(RequestedTarget, DTMode)
255 userStatus[username] = [RequestedTarget, now, 0]
256 return RequestedTarget
257 (targetIn, lastUpdate, work) = status
258 if work <= config.DynamicTargetGoal:
259 if now < lastUpdate + config.DynamicTargetWindow and (targetIn is None or targetIn >= networkTarget):
260 # No reason to change it just yet
261 return clampTarget(targetIn, DTMode)
263 # No shares received, reset to minimum
265 getTarget.logger.debug("No shares from %s, resetting to minimum target" % (repr(username),))
266 userStatus[username] = [None, now, 0]
267 return clampTarget(None, DTMode)
269 deltaSec = now - lastUpdate
270 target = targetIn or config.ShareTarget
271 target = int(target * config.DynamicTargetGoal * deltaSec / config.DynamicTargetWindow / work)
272 target = clampTarget(target, DTMode)
273 if target != targetIn:
274 pfx = 'Retargetting %s' % (repr(username),)
275 tin = targetIn or config.ShareTarget
276 getTarget.logger.debug("%s from: %064x (pdiff %s)" % (pfx, tin, target2pdiff(tin)))
277 tgt = target or config.ShareTarget
278 getTarget.logger.debug("%s to: %064x (pdiff %s)" % (pfx, tgt, target2pdiff(tgt)))
279 userStatus[username] = [target, now, 0]
281 getTarget.logger = logging.getLogger('getTarget')
283 def TopTargets(n = 0x10):
284 tmp = list(k for k, v in userStatus.items() if not v[0] is None)
285 tmp.sort(key=lambda k: -userStatus[k][0])
289 tmp2[t] = target2pdiff(t)
292 tgt = userStatus[k][0]
293 print('%-34s %064x %3d' % (k, tgt, t2d(tgt)))
295 def RegisterWork(username, wli, wld, RequestedTarget = None):
297 target = getTarget(username, now, RequestedTarget=RequestedTarget)
298 wld = tuple(wld) + (target,)
299 workLog.setdefault(username, {})[wli] = (wld, now)
300 return target or config.ShareTarget
302 def getBlockHeader(username):
305 hdr = MakeBlockHeader(MRD)
306 workLog.setdefault(username, {})[merkleRoot] = (MRD, time())
307 target = RegisterWork(username, merkleRoot, MRD)
308 return (hdr, workLog[username][merkleRoot], target)
310 def getBlockTemplate(username, p_magic = None, RequestedTarget = None):
311 if server.tls.wantClear:
313 elif p_magic and username not in workLog:
318 MC = MM.getMC(wantClear)
319 (dummy, merkleTree, coinbase, prevBlock, bits) = MC[:5]
320 wliPos = coinbase[0] + 2
321 wliLen = coinbase[wliPos - 1]
322 wli = coinbase[wliPos:wliPos+wliLen]
323 target = RegisterWork(username, wli, MC, RequestedTarget=RequestedTarget)
324 return (MC, workLog[username][wli], target)
326 def getStratumJob(jobid, wantClear = False):
327 MC = MM.getMC(wantClear)
328 (dummy, merkleTree, coinbase, prevBlock, bits) = MC[:5]
330 workLog.setdefault(None, {})[jobid] = (MC, now)
331 return (MC, workLog[None][jobid])
333 def getExistingStratumJob(jobid):
334 wld = workLog[None][jobid]
342 from bitcoin.varlen import varlenEncode, varlenDecode
344 from merklemaker import assembleBlock
346 if not hasattr(config, 'BlockSubmissions'):
347 config.BlockSubmissions = None
350 def blockSubmissionThread(payload, blkhash, share):
351 if config.BlockSubmissions is None:
352 servers = list(a for b in MM.TemplateSources for a in b)
354 servers = list(config.BlockSubmissions)
356 if hasattr(share['merkletree'], 'source_uri'):
358 'access': jsonrpc.ServiceProxy(share['merkletree'].source_uri),
359 'name': share['merkletree'].source,
362 servers = list(a for b in MM.TemplateSources for a in b)
364 myblock = (blkhash, payload[4:36])
365 payload = b2a_hex(payload).decode('ascii')
372 UpstreamBitcoindJSONRPC = TS['access']
374 # BIP 22 standard submitblock
375 reason = UpstreamBitcoindJSONRPC.submitblock(payload)
376 except BaseException as gbterr:
377 gbterr_fmt = traceback.format_exc()
380 # bitcoind 0.5/0.6 getmemorypool
381 reason = UpstreamBitcoindJSONRPC.getmemorypool(payload)
383 # Old BIP 22 draft getmemorypool
384 reason = UpstreamBitcoindJSONRPC.getmemorypool(payload, {})
387 elif reason is False:
389 except BaseException as gmperr:
392 # FIXME: This will show "Method not found" on pre-BIP22 servers
393 RaiseRedFlags(gbterr_fmt)
395 if MM.currentBlock[0] not in myblock and tries > len(servers):
396 RBFs.append( (('next block', MM.currentBlock, now, (gbterr, gmperr)), payload, blkhash, share) )
397 RaiseRedFlags('Giving up on submitting block to upstream \'%s\'' % (TS['name'],))
398 if share['upstreamRejectReason'] is PendingUpstream:
399 share['upstreamRejectReason'] = 'GAVE UP'
400 share['upstreamResult'] = False
407 # At this point, we have a reason back
409 # FIXME: The returned value could be a list of multiple responses
410 msg = 'Upstream \'%s\' block submission failed: %s' % (TS['name'], reason,)
411 if success and reason in ('stale-prevblk', 'bad-prevblk', 'orphan', 'duplicate'):
413 blockSubmissionThread.logger.debug(msg)
415 RBFs.append( (('upstream reject', reason, time()), payload, blkhash, share) )
418 blockSubmissionThread.logger.debug('Upstream \'%s\' accepted block' % (TS['name'],))
420 if share['upstreamRejectReason'] is PendingUpstream:
421 share['upstreamRejectReason'] = reason
422 share['upstreamResult'] = not reason
424 blockSubmissionThread.logger = logging.getLogger('blockSubmission')
426 def checkData(share):
429 (prevBlock, height, bits) = MM.currentBlock
430 sharePrevBlock = data[4:36]
431 if sharePrevBlock != prevBlock:
432 if sharePrevBlock == MM.lastBlock[0]:
433 raise RejectedShare('stale-prevblk')
434 raise RejectedShare('bad-prevblk')
436 if data[72:76] != bits:
437 raise RejectedShare('bad-diffbits')
439 # Note that we should accept miners reducing version to 1 if they don't understand 2 yet
440 # FIXME: When the supermajority is upgraded to version 2, stop accepting 1!
441 if data[1:4] != b'\0\0\0' or data[0] > 2:
442 raise RejectedShare('bad-version')
444 def buildStratumData(share, merkleroot):
445 (prevBlock, height, bits) = MM.currentBlock
450 data += share['ntime'][::-1]
452 data += share['nonce'][::-1]
457 def IsJobValid(wli, wluser = None):
458 if wluser not in workLog:
460 if wli not in workLog[wluser]:
462 (wld, issueT) = workLog[wluser][wli]
463 if time() < issueT - 120:
467 def checkShare(share):
468 shareTime = share['time'] = time()
470 username = share['username']
476 if username not in workLog:
477 raise RejectedShare('unknown-user')
478 MWL = workLog[username]
480 shareMerkleRoot = data[36:68]
481 if 'blkdata' in share:
482 pl = share['blkdata']
483 (txncount, pl) = varlenDecode(pl)
484 cbtxn = bitcoin.txn.Txn(pl)
485 othertxndata = cbtxn.disassemble(retExtra=True)
486 coinbase = cbtxn.getCoinbase()
487 wliPos = coinbase[0] + 2
488 wliLen = coinbase[wliPos - 1]
489 wli = coinbase[wliPos:wliPos+wliLen]
493 wli = shareMerkleRoot
501 buildStratumData(share, b'\0' * 32)
507 raise RejectedShare('unknown-work')
508 (wld, issueT) = MWL[wli]
511 share['issuetime'] = issueT
513 (workMerkleTree, workCoinbase) = wld[1:3]
514 share['merkletree'] = workMerkleTree
516 cbtxn = deepcopy(workMerkleTree.data[0])
517 coinbase = workCoinbase + share['extranonce1'] + share['extranonce2']
518 cbtxn.setCoinbase(coinbase)
520 data = buildStratumData(share, workMerkleTree.withFirst(cbtxn))
521 shareMerkleRoot = data[36:68]
523 if data in DupeShareHACK:
524 raise RejectedShare('duplicate')
525 DupeShareHACK[data] = None
527 blkhash = dblsha(data)
528 if blkhash[28:] != b'\0\0\0\0':
529 raise RejectedShare('H-not-zero')
530 blkhashn = LEhash2int(blkhash)
533 logfunc = getattr(checkShare.logger, 'info' if blkhashn <= networkTarget else 'debug')
534 logfunc('BLKHASH: %64x' % (blkhashn,))
535 logfunc(' TARGET: %64x' % (networkTarget,))
537 # NOTE: this isn't actually needed for MC mode, but we're abusing it for a trivial share check...
538 txlist = workMerkleTree.data
539 txlist = [deepcopy(txlist[0]),] + txlist[1:]
541 cbtxn.setCoinbase(coinbase or workCoinbase)
544 if blkhashn <= networkTarget:
545 logfunc("Submitting upstream")
546 RBDs.append( deepcopy( (data, txlist, share.get('blkdata', None), workMerkleTree, share, wld) ) )
548 payload = assembleBlock(data, txlist)
550 payload = share['data']
551 if len(othertxndata):
552 payload += share['blkdata']
554 payload += assembleBlock(data, txlist)[80:]
555 logfunc('Real block payload: %s' % (b2a_hex(payload).decode('utf8'),))
557 threading.Thread(target=blockSubmissionThread, args=(payload, blkhash, share)).start()
558 bcnode.submitBlock(payload)
559 if config.DelayLogForUpstream:
560 share['upstreamRejectReason'] = PendingUpstream
562 share['upstreamRejectReason'] = None
563 share['upstreamResult'] = True
564 MM.updateBlock(blkhash)
567 if gotwork and blkhashn <= config.GotWorkTarget:
569 coinbaseMrkl = cbtxn.data
570 coinbaseMrkl += blkhash
571 steps = workMerkleTree._steps
572 coinbaseMrkl += pack('B', len(steps))
575 coinbaseMrkl += b"\0\0\0\0"
577 info['hash'] = b2a_hex(blkhash).decode('ascii')
578 info['header'] = b2a_hex(data).decode('ascii')
579 info['coinbaseMrkl'] = b2a_hex(coinbaseMrkl).decode('ascii')
580 thr = threading.Thread(target=submitGotwork, args=(info,))
584 checkShare.logger.warning('Failed to build gotwork request')
586 if 'target' in share:
587 workTarget = share['target']
593 if workTarget is None:
594 workTarget = config.ShareTarget
595 if blkhashn > workTarget:
596 raise RejectedShare('high-hash')
597 share['target'] = workTarget
598 share['_targethex'] = '%064x' % (workTarget,)
600 shareTimestamp = unpack('<L', data[68:72])[0]
601 if shareTime < issueT - 120:
602 raise RejectedShare('stale-work')
603 if shareTimestamp < shareTime - 300:
604 raise RejectedShare('time-too-old')
605 if shareTimestamp > shareTime + 7200:
606 raise RejectedShare('time-too-new')
608 if config.DynamicTargetting and username in userStatus:
609 # NOTE: userStatus[username] only doesn't exist across restarts
610 status = userStatus[username]
611 target = status[0] or config.ShareTarget
612 if target == workTarget:
613 userStatus[username][2] += 1
615 userStatus[username][2] += float(target) / workTarget
619 cbpreLen = len(cbpre)
620 if coinbase[:cbpreLen] != cbpre:
621 raise RejectedShare('bad-cb-prefix')
623 # Filter out known "I support" flags, to prevent exploits
624 for ff in (b'/P2SH/', b'NOP2SH', b'p2sh/CHV', b'p2sh/NOCHV'):
625 if coinbase.find(ff) > max(-1, cbpreLen - len(ff)):
626 raise RejectedShare('bad-cb-flag')
628 if len(coinbase) > 100:
629 raise RejectedShare('bad-cb-length')
631 if shareMerkleRoot != workMerkleTree.withFirst(cbtxn):
632 raise RejectedShare('bad-txnmrklroot')
634 if len(othertxndata):
635 allowed = assembleBlock(data, txlist)[80:]
636 if allowed != share['blkdata']:
637 raise RejectedShare('bad-txns')
638 checkShare.logger = logging.getLogger('checkShare')
641 if '_origdata' in share:
642 share['solution'] = share['_origdata']
644 share['solution'] = b2a_hex(swap32(share['data'])).decode('utf8')
645 for i in loggersShare:
648 def receiveShare(share):
649 # TODO: username => userid
652 except RejectedShare as rej:
653 share['rejectReason'] = str(rej)
655 except BaseException as e:
656 share['rejectReason'] = 'ERROR'
659 if not share.get('upstreamRejectReason', None) is PendingUpstream:
662 def newBlockNotification():
663 logging.getLogger('newBlockNotification').info('Received new block notification')
664 MM.updateMerkleTree()
665 # TODO: Force RESPOND TO LONGPOLLS?
668 def newBlockNotificationSIGNAL(signum, frame):
669 # Use a new thread, in case the signal handler is called with locks held
670 thr = threading.Thread(target=newBlockNotification, name='newBlockNotification via signal %s' % (signum,))
674 from signal import signal, SIGUSR1
675 signal(SIGUSR1, newBlockNotificationSIGNAL)
683 from time import sleep
686 if getattr(config, 'SaveStateFilename', None) is None:
687 config.SaveStateFilename = 'eloipool.worklog'
690 logger = logging.getLogger('stopServers')
692 if hasattr(stopServers, 'already'):
693 logger.debug('Already tried to stop servers before')
695 stopServers.already = True
697 logger.info('Stopping servers...')
698 global bcnode, server
699 servers = (bcnode, server, stratumsrv)
706 logger.error('Failed to stop server %s\n%s' % (s, traceback.format_exc()))
712 sl.append(s.__class__.__name__)
717 logger.error('Servers taking too long to stop (%s), giving up' % (', '.join(sl)))
722 for fd in s._fd.keys():
726 for i in loggersShare:
727 if hasattr(i, 'stop'):
730 def saveState(SAVE_STATE_FILENAME, t = None):
731 logger = logging.getLogger('saveState')
733 # Then, save data needed to resume work
734 logger.info('Saving work state to \'%s\'...' % (SAVE_STATE_FILENAME,))
738 with open(SAVE_STATE_FILENAME, 'wb') as f:
740 pickle.dump(DupeShareHACK, f)
741 pickle.dump(workLog, f)
746 logger.error('Failed to save work\n' + traceback.format_exc())
748 os.unlink(SAVE_STATE_FILENAME)
750 logger.error(('Failed to unlink \'%s\'; resume may have trouble\n' % (SAVE_STATE_FILENAME,)) + traceback.format_exc())
756 saveState(config.SaveStateFilename, t=t)
757 logging.getLogger('exit').info('Goodbye...')
758 os.kill(os.getpid(), signal.SIGTERM)
765 saveState(config.SaveStateFilename, t=t)
766 logging.getLogger('restart').info('Restarting...')
768 os.execv(sys.argv[0], sys.argv)
770 logging.getLogger('restart').error('Failed to exec\n' + traceback.format_exc())
772 def restoreState(SAVE_STATE_FILENAME):
773 if not os.path.exists(SAVE_STATE_FILENAME):
776 global workLog, DupeShareHACK
778 logger = logging.getLogger('restoreState')
779 s = os.stat(SAVE_STATE_FILENAME)
780 logger.info('Restoring saved state from \'%s\' (%d bytes)' % (SAVE_STATE_FILENAME, s.st_size))
782 with open(SAVE_STATE_FILENAME, 'rb') as f:
786 # Future formats, not supported here
790 # Old format, from 2012-02-02 to 2012-02-03
795 if isinstance(t, dict):
796 # Old format, from 2012-02-03 to 2012-02-03
800 # Current format, from 2012-02-03 onward
801 DupeShareHACK = pickle.load(f)
803 if t + 120 >= time():
804 workLog = pickle.load(f)
806 logger.debug('Skipping restore of expired workLog')
808 logger.error('Failed to restore state\n' + traceback.format_exc())
810 logger.info('State restored successfully')
812 logger.info('Total downtime: %g seconds' % (time() - t,))
815 from jsonrpcserver import JSONRPCListener, JSONRPCServer
816 import interactivemode
817 from networkserver import NetworkListener
820 from stratumserver import StratumServer
823 if __name__ == "__main__":
824 if not hasattr(config, 'ShareLogging'):
825 config.ShareLogging = ()
826 if hasattr(config, 'DbOptions'):
827 logging.getLogger('backwardCompatibility').warn('DbOptions configuration variable is deprecated; upgrade to ShareLogging var before 2013-03-05')
828 config.ShareLogging = list(config.ShareLogging)
829 config.ShareLogging.append( {
831 'engine': 'postgres',
832 'dbopts': config.DbOptions,
833 'statement': "insert into shares (rem_host, username, our_result, upstream_result, reason, solution) values ({Q(remoteHost)}, {username}, {YN(not(rejectReason))}, {YN(upstreamResult)}, {rejectReason}, decode({solution}, 'hex'))",
835 for i in config.ShareLogging:
836 if not hasattr(i, 'keys'):
838 logging.getLogger('backwardCompatibility').warn('Using short-term backward compatibility for ShareLogging[\'%s\']; be sure to update config before 2012-04-04' % (name,))
839 if name == 'postgres':
842 'engine': 'postgres',
843 'dbopts': parameters,
845 elif name == 'logfile':
847 i['thropts'] = parameters
848 if 'filename' in parameters:
849 i['filename'] = parameters['filename']
850 i['thropts'] = dict(i['thropts'])
851 del i['thropts']['filename']
859 fp, pathname, description = imp.find_module(name, sharelogging.__path__)
860 m = imp.load_module(name, fp, pathname, description)
861 lo = getattr(m, name)(**parameters)
862 loggersShare.append(lo)
864 logging.getLogger('sharelogging').error("Error setting up share logger %s: %s", name, sys.exc_info())
867 if not hasattr(config, 'BitcoinNodeAddresses'):
868 config.BitcoinNodeAddresses = ()
869 for a in config.BitcoinNodeAddresses:
870 LSbc.append(NetworkListener(bcnode, a))
872 if hasattr(config, 'UpstreamBitcoindNode') and config.UpstreamBitcoindNode:
873 BitcoinLink(bcnode, dest=config.UpstreamBitcoindNode)
875 import jsonrpc_getblocktemplate
876 import jsonrpc_getwork
877 import jsonrpc_setworkaux
879 server = JSONRPCServer()
880 server.tls = threading.local()
881 server.tls.wantClear = False
882 if hasattr(config, 'JSONRPCAddress'):
883 logging.getLogger('backwardCompatibility').warn('JSONRPCAddress configuration variable is deprecated; upgrade to JSONRPCAddresses list before 2013-03-05')
884 if not hasattr(config, 'JSONRPCAddresses'):
885 config.JSONRPCAddresses = []
886 config.JSONRPCAddresses.insert(0, config.JSONRPCAddress)
888 for a in config.JSONRPCAddresses:
889 LS.append(JSONRPCListener(server, a))
890 if hasattr(config, 'SecretUser'):
891 server.SecretUser = config.SecretUser
892 server.aux = MM.CoinbaseAux
893 server.getBlockHeader = getBlockHeader
894 server.getBlockTemplate = getBlockTemplate
895 server.receiveShare = receiveShare
896 server.RaiseRedFlags = RaiseRedFlags
897 server.ShareTarget = config.ShareTarget
899 if hasattr(config, 'TrustedForwarders'):
900 server.TrustedForwarders = config.TrustedForwarders
901 server.ServerName = config.ServerName
903 stratumsrv = StratumServer()
904 stratumsrv.getStratumJob = getStratumJob
905 stratumsrv.getExistingStratumJob = getExistingStratumJob
906 stratumsrv.receiveShare = receiveShare
907 stratumsrv.getTarget = getTarget
908 stratumsrv.defaultTarget = config.ShareTarget
909 stratumsrv.IsJobValid = IsJobValid
910 if not hasattr(config, 'StratumAddresses'):
911 config.StratumAddresses = ()
912 for a in config.StratumAddresses:
913 NetworkListener(stratumsrv, a)
917 restoreState(config.SaveStateFilename)
919 prune_thr = threading.Thread(target=WorkLogPruner, args=(workLog,))
920 prune_thr.daemon = True
923 bcnode_thr = threading.Thread(target=bcnode.serve_forever)
924 bcnode_thr.daemon = True
927 stratum_thr = threading.Thread(target=stratumsrv.serve_forever)
928 stratum_thr.daemon = True
931 server.serve_forever()