Skip to content

Commit

Permalink
mdf4reader: improved concatenation performance of DLBlocks by using b…
Browse files Browse the repository at this point in the history
…ytearray() function (divide total time by 2 from test files)
  • Loading branch information
aymeric.rateau@gmail.com committed Feb 10, 2015
1 parent 55ccfb4 commit fe92cfc
Showing 1 changed file with 6 additions and 6 deletions.
12 changes: 6 additions & 6 deletions mdf4reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -348,30 +348,30 @@ def load(self, record, zip=None, nameList=None, sortedFlag=True): # reads sorted
temps['dl_offset']=temps.mdfblockread(self.fid, UINT64, temps['dl_count'])
if temps['dl_dl_next']:
index=1
while temps['dl_dl_next']:
while temps['dl_dl_next']: # reads pointers to all data blocks (DT, RD, SD, DZ)
temp=MDFBlock()
temp.loadHeader(self.fid, temps['dl_dl_next'])
temps['dl_dl_next']=temp.mdfblockread(self.fid, LINK, 1)
temps['dl_data'][index]=[temp.mdfblockread(self.fid, LINK, 1) for Link in range(temp['link_count']-1)]
index+=1
if temps['dl_count']:
# read and concatenate raw blocks
buf=b''
buf=bytearray()
for DL in temps['dl_data']:
for pointer in temps['dl_data'][DL]:
# read fist data blocks linked by DLBlock to identify data block type
data_block=MDFBlock()
data_block.loadHeader(self.fid, pointer)
if data_block['id'] in ('##DT', '##RD', b'##DT', b'##RD', '##SD', b'##SD'):
buf+=self.fid.read(data_block['length']-24)
buf.extend(self.fid.read(data_block['length']-24))
elif data_block['id'] in ('##DZ', b'##DZ'):
data_block['dz_org_block_type']=data_block.mdfblockreadCHAR(self.fid, 2)
data_block['dz_zip_type']=data_block.mdfblockread(self.fid, UINT8, 1)
data_block['dz_reserved']=data_block.mdfblockreadBYTE(self.fid, 1)
data_block['dz_zip_parameter']=data_block.mdfblockread(self.fid, UINT32, 1)
data_block['dz_org_data_length']=data_block.mdfblockread(self.fid, UINT64, 1)
data_block['dz_data_length']=data_block.mdfblockread(self.fid, UINT64, 1)
buf+=self.fid.read( data_block['dz_data_length'] )
buf.extend(self.fid.read( data_block['dz_data_length'] ))
data_block['data']=buf
temps['data']=DATABlock(record, parent_block=data_block, channelList=nameList, sortedFlag=sortedFlag)
else: # empty datalist
Expand Down Expand Up @@ -1027,8 +1027,8 @@ def convertAllChannel4(self):
if ncpu<1:
ncpu=1
pool = Pool(processes=ncpu)
args = [self[channelName] for channelName in self]
result = pool.map_async(convertChannelData4,args)
args = [(self[channelName]['data'], self.convert_tables) for channelName in self]
result = pool.apply_async(convertChannelData4,args)
result.get()
index=0
for channelName in self:
Expand Down

0 comments on commit fe92cfc

Please sign in to comment.