1 """This module implements IO-related classes, functions and procedures.
2
3 Classes:
4 * TemporaryFile : creates a temporary file stroring the evolution of an analysis.
5 * EndOfFile : an empty dummy class used by |DCDReader|.
6 * FortranBinaryFile : sets up a binary file reader.
7 * DCDFile : sets up a DCD file reader.
8 * AmberNetCDFConverter : converts a trajectory from Amber > 9 to a MMTK NetCDF trajectory.
9 * CHARMMConverter : converts a trajectory from CHARMM to a MMTK NetCDF trajectory.
10 * DL_POLYConverter : converts a trajectory from DL_POLY > 9 to a MMTK NetCDF trajectory.
11 * MaterialsStudioConverter : converts a trajectory from MaterialsStudio > 9 to a MMTK NetCDF trajectory.
12 * NAMDConverter : converts a trajectory from NAMD to a MMTK NetCDF trajectory.
13 * VASPConverter : converts a trajectory from VASP > 9 to a MMTK NetCDF trajectory.
14
15 Procedures:
16 * convertNetCDFToASCII: converts a NetCDF file into an ASCII file.
17 * convertASCIIToNetCDF: converts an ASCII file into a NetCDF file.
18 """
19
20
21 from copy import copy
22 import os
23 import re
24 import struct
25 from struct import calcsize
26 import subprocess
27 import sys
28 from tempfile import mktemp
29 from time import asctime, ctime, localtime, strftime, time
30 import xml.dom.minidom
31
32
33 from Scientific import N
34 from Scientific.IO.FortranFormat import FortranFormat, FortranLine
35 from Scientific.Geometry import Vector
36 from Scientific.IO.NetCDF import _NetCDFFile, NetCDFFile
37 from Scientific.IO.TextFile import TextFile
38
39
40 from MMTK import Atom, AtomCluster
41 from MMTK import Units
42 from MMTK.ParticleProperties import Configuration, ParticleVector
43 from MMTK.PDB import PDBConfiguration
44 from MMTK.Trajectory import Trajectory, SnapshotGenerator, TrajectoryOutput
45 from MMTK.Universe import InfiniteUniverse, OrthorhombicPeriodicUniverse, ParallelepipedicPeriodicUniverse
46
47
48 from nMOLDYN.Core.Error import Error
49 from nMOLDYN.Core.Mathematics import basisVectors
50 from nMOLDYN.Core.Logger import LogMessage
51 from nMOLDYN.Core.Preferences import PREFERENCES
52
54 """Creates a temporary file used to monitor (progress, start, end ...) an analysis.
55 """
56
57 - def __init__(self, module = '', statusBar = None):
58 """The constructor.
59
60 @param module: the name of the analysis the temporary file will be attached to.
61 @type module: string.
62
63 @param statusBar: if not None, an instance of nMOLDYN.GUI.Widgets.StatusBar. The status bar, attached to the
64 analysis, to update.
65 @type statusBar: instance of nMOLDYN.GUI.Widgets.StatusBar
66 """
67
68 self.module = module
69 self.statusBar = statusBar
70
71
72 try:
73 import win32api
74 owner = win32api.GetUserName()
75
76 except ImportError:
77 from pwd import getpwuid
78 from os import getuid
79 owner = getpwuid(getuid())[0]
80
81
82 creationTime = ctime(time())
83
84
85 pid = os.getpid()
86
87
88 suffix = '.'.join(['',owner, str(pid), self.module, 'moldyn'])
89
90
91 try:
92 from tempfile import mkstemp
93
94 handle, self.filename = mkstemp(suffix, text=True)
95 self.file = os.fdopen(handle, 'w')
96
97 except ImportError:
98
99 self.filename = mktemp(suffix)
100
101 self.file = TextFile(self.filename, 'w')
102
103 self.file.write(str(pid) + '\n')
104
105 self.file.write(creationTime + '\n')
106
107 self.file.write(str(owner) + '\n')
108
109 self.file.write(str(self.module) + '\n')
110
111
112 self.file.write('0\n')
113
114
115 self.file.flush()
116
117 self.counter = 0
118
119 try:
120 self.displayProgressRate = int(PREFERENCES.progress_rate)
121 except ValueError:
122 self.displayProgressRate = 10
123
124 if (self.displayProgressRate <= 1) or (self.displayProgressRate >= 100):
125 self.displayProgressRate = 10
126
128 """Updates the temporary file by writing the percentage of the job that has been done.
129
130 @param norm: the number of steps of the outer loop of the analysis to monitor.
131 @type norm: integer.
132 """
133
134
135 t = int(100*self.counter/norm)
136 oldProgress = (t/self.displayProgressRate)*self.displayProgressRate
137
138 self.counter += 1
139
140 t = int(100*self.counter/norm)
141 newProgress = (t/self.displayProgressRate)*self.displayProgressRate
142
143 if newProgress != oldProgress:
144 LogMessage('info','Progress rate = %3d' % newProgress,['file','console'])
145
146 if self.statusBar is not None:
147 self.statusBar.setValue(t)
148
149
150
151 self.file.write(str(t)+"\n")
152
153 self.file.flush()
154
156 """Closes and removes the temporary file.
157 """
158
159 self.file.close()
160 os.unlink(self.filename)
161
164
166 """Sets up a Fortran binary file reader.
167
168 Comments:
169 -written by Konrad Hinsen in the scope of a DCD file reader.
170 """
171 - def __init__(self, filename, byte_order = '='):
172 self.file = file(filename, 'rb')
173 self.byte_order = byte_order
174
177
179 data = self.file.read(4)
180 if not data:
181 raise StopIteration
182 reclen = struct.unpack(self.byte_order + 'i', data)[0]
183 data = self.file.read(reclen)
184 reclen2 = struct.unpack(self.byte_order + 'i', self.file.read(4))[0]
185 assert reclen==reclen2
186 return data
187
189 data = self.file.read(4)
190 reclen = struct.unpack(self.byte_order + 'i', data)[0]
191 self.file.seek(reclen, 1)
192 reclen2 = struct.unpack(self.byte_order + 'i', self.file.read(4))[0]
193 assert reclen==reclen2
194
196 try:
197 data = self.next()
198 except StopIteration:
199 raise EndOfFile()
200 if repeat:
201 unit = struct.calcsize(self.byte_order + format)
202 assert len(data) % unit == 0
203 format = (len(data)/unit) * format
204 try:
205 return struct.unpack(self.byte_order + format, data)
206 except:
207 raise
208
210 """Sets up a DCD file reader.
211 """
212
214 """The constructor.
215
216 @param dcd_filename: the name of the DCD file to read.
217 @type dcd_filename: string.
218 """
219
220 self.byte_order = None
221 data = file(dcd_filename, 'rb').read(4)
222 for byte_order in ['<', '>']:
223 reclen = struct.unpack(byte_order + 'i', data)[0]
224 if reclen == 84:
225 self.byte_order = byte_order
226 break
227 if self.byte_order is None:
228 raise IOError("%s is not a DCD file" % dcd_filename)
229
230 self.binary = FortranBinaryFile(dcd_filename, self.byte_order)
231
232 header_data = self.binary.next()
233 if header_data[:4] != 'CORD':
234 raise IOError("%s is not a DCD file" % dcd_filename)
235 self.header = struct.unpack(self.byte_order + '9id9i', header_data[4:])
236 self.nset = self.header[0]
237 self.istart = self.header[1]
238 self.nsavc = self.header[2]
239 self.namnf = self.header[8]
240 self.charmm_version = self.header[-1]
241 self.has_pbc_data = False
242 self.has_4d = False
243 if self.charmm_version != 0:
244 self.header = struct.unpack(self.byte_order + '9if10i',
245 header_data[4:])
246 if self.header[10] != 0:
247 self.has_pbc_data = True
248 if self.header[11] != 0:
249 self.has_4d = True
250 self.delta = self.header[9]*Units.akma_time
251
252 title_data = self.binary.next()
253 nlines = struct.unpack(self.byte_order + 'i', title_data[:4])[0]
254 assert len(title_data) == 80*nlines+4
255 title_data = title_data[4:]
256 title = []
257 for i in range(nlines):
258 title.append(title_data[:80].rstrip())
259 title_data = title_data[80:]
260 self.title = '\n'.join(title)
261
262 self.natoms = self.binary.getRecord('i')[0]
263
264 if self.namnf > 0:
265 raise Error('NAMD converter can not handle fixed atoms yet')
266
268 """Reads a frame of the DCD file.
269 """
270 if self.has_pbc_data:
271 unit_cell = N.array(self.binary.getRecord('6d'), N.Float)
272 a, gamma, b, beta, alpha, c = unit_cell
273 if -1. < alpha < 1. and -1. < beta < 1. and -1. < gamma < 1.:
274
275
276 alpha = 0.5*N.pi-N.arcsin(alpha)
277 beta = 0.5*N.pi-N.arcsin(beta)
278 gamma = 0.5*N.pi-N.arcsin(gamma)
279 else:
280
281 alpha *= Units.deg
282 beta *= Units.deg
283 gamma *= Units.deg
284 unit_cell = (a*Units.Ang, b*Units.Ang, c*Units.Ang,
285 alpha, beta, gamma)
286 else:
287 unit_cell = None
288 format = '%df' % self.natoms
289 x = N.array(self.binary.getRecord(format), N.Float32)*Units.Ang
290 y = N.array(self.binary.getRecord(format), N.Float32)*Units.Ang
291 z = N.array(self.binary.getRecord(format), N.Float32)*Units.Ang
292 if self.has_4d:
293 self.binary.skipRecord()
294 return unit_cell, x, y, z
295
297 """Skips a frame of the DCD file.
298 """
299 nrecords = 3
300 if self.has_pbc_data:
301 nrecords += 1
302 if self.has_4d:
303 nrecords += 1
304 for i in range(nrecords):
305 self.binary.skipRecord()
306
309
315
316 -def convertNetCDFToASCII(inputFile, outputFile, variables, floatPrecision = 9, doublePrecision = 17):
317 """Converts a file in NetCDF format to a file in ASCII/CDL format using the ncdump program provided with
318 the netcdf library.
319
320 @param inputFile: the name of the NetCDF input file.
321 @type inputFile: string
322
323 @param outputFile: the name of the CDL output file.
324 @type outputFile: string
325
326 @param variables: list of the NetCDF variables names (string) to extract from the NetCDF file for conversion.
327 @type variables: list
328
329 @param floatPrecision: the precision on the float numbers.
330 @type floatPrecision: integer
331
332 @param doublePrecision: the precision on the double numbers.
333 @type doublePrecision: integer
334 """
335
336 if not os.path.exists(inputFile):
337 raise Error('The NetCDF input file name does not exist.')
338
339 if not os.path.exists(PREFERENCES.ncdump_path):
340 raise Error('The path for ncdump program is not correctly set.')
341
342 if not isinstance(variables,(list,tuple)):
343 raise Error('No NetCDF variables list given for conversion.')
344
345 if not variables:
346 raise Error('Empty NetCDF variables list given for conversion.')
347
348 variablesList = ','.join(variables)
349
350 if not outputFile:
351 raise Error('No ASCII output file name.')
352
353 try:
354 output = open(outputFile, 'w')
355 s = subprocess.call([PREFERENCES.ncdump_path, '-b', 'c', '-v',\
356 variablesList, '-p', '%d,%d' % (floatPrecision,doublePrecision),\
357 inputFile], stdout = output)
358
359 output.close()
360
361 if s:
362 raise
363
364 except:
365 raise Error('Conversion failed.')
366
368 """Converts a file in ASCII format to a file in NetCDF format using the ncgen program provided with
369 the netcdf library.
370
371 @param inputFile: the name of the NetCDF input file.
372 @type inputFile: string
373
374 @param outputFile: the name of the CDL output file.
375 @type outputFile: string
376 """
377
378 if not os.path.exists(PREFERENCES.ncgen_path):
379 return
380
381 if inputFile[-4:] == '.cdl':
382 try:
383 subprocess.call([PREFERENCES.ncgen_path, '-x', '-o', outputFile, inputFile])
384 except:
385 return
386
387 else:
388 baseName = os.path.splitext(os.path.basename(inputFile))[0]
389
390 try:
391 ascii = open(inputFile, 'r')
392
393 except IOError:
394 raise Error('Can not open for reading %s ASCII file.' % inputFile)
395
396 else:
397 asciiContents = ascii.readlines()
398 ascii.close()
399
400
401 asciiContents = [d.strip() for d in asciiContents if d.strip() != '']
402
403 globalAttributes = {}
404 variables = []
405
406 globalAttributes['comment'] = ''
407
408 numLines = []
409
410 for line in asciiContents:
411
412 if line[0] == '#':
413
414 g = re.findall('#\s*global(.*=.*)',line.lower())
415 if g:
416 try:
417 gName, gVar = g[0].split('=')
418 globalAttributes[gName.strip()] = gVar.strip()
419 continue
420
421 except:
422 raise Error('The line %s can not be parsed.' % line)
423
424 v = re.findall('#\s*variable(.*)',line.lower())
425 if v:
426 try:
427 temp = [v.split('=') for v in v[0].strip().split(';') if v]
428 temp = [[r[col].strip() for r in temp] for col in range(len(temp[0]))]
429 d = dict(zip(temp[0],temp[1]))
430
431 if not d.has_key('name'):
432 raise
433
434 variables.append(d)
435 continue
436
437 except:
438 raise Error('The line %s can not be parsed.' % line)
439
440 globalAttributes['comment'] += line[1:] + '\n'
441
442
443 else:
444 numLines.append([v for v in line.split()])
445
446 data = [[r[col].strip() for r in numLines] for col in range(len(numLines[0]))]
447
448 if len(variables) != len(data):
449 variables = [{'name' : 'column'+str(i)} for i in range(1, len(data)+1)]
450
451
452 try:
453 import win32api
454 owner = win32api.GetUserName()
455
456 except ImportError:
457 from pwd import getpwuid
458 from os import getuid
459 owner = getpwuid(getuid())[0]
460
461
462 pid = os.getpid()
463
464 suffix = '.'.join(['',owner, str(pid), 'moldyn'])
465
466 cdlFile = mktemp(suffix)
467
468 file = open(cdlFile, 'w')
469
470 file.write('netcdf %s {\n' % baseName)
471
472 file.write('dimensions:\n')
473 file.write('\tnvalues = %d ;\n' % len(data[0]))
474
475 file.write('\nvariables:\n')
476 for var in variables:
477 file.write('\tdouble %s(nvalues) ;\n' % var['name'])
478 for k, v in var.items():
479 if k == 'name':
480 continue
481 file.write('\t%s:%s = "%s";\n' % (var['name'],k,v))
482
483 file.write('\n// global attributes:\n')
484 for k, v in globalAttributes.items():
485 file.write('\t:%s = "%s";\n' % (k,v))
486
487 file.write('\ndata:\n\n')
488
489 for i in range(len(variables)):
490 file.write(' %s =\n ' % variables[i]['name'])
491 toFlush = ' '
492 for val in data[i]:
493 if len(toFlush + val + ', ') > 80:
494 file.write(toFlush.strip() + '\n')
495 toFlush = ' ' + val + ', '
496 else:
497 toFlush += val + ', '
498
499 toFlush = toFlush.strip()
500 if toFlush[-1] == ',':
501 toFlush = toFlush[:-1]
502
503 file.write(toFlush + ' ;\n\n')
504
505 file.write('}')
506
507 file.close()
508
509 try:
510 subprocess.call([PREFERENCES.ncgen_path, '-x', '-o', outputFile, cdlFile])
511
512 except:
513 return
514
515 finally:
516 os.unlink(cdlFile)
517
519 """Converts an Amber NetCDF Trajectory into a MMTK NetCDFFile.
520
521 Comments:
522 - this code is an improved version of the original converter written by Paolo Calligari.
523 """
524
525 - def __init__(self, pdbFile, amberNetCDFFile, outputFile, timeStep = 1.0):
526 """
527 The constructor. Will do the conversion.
528
529 @param pdbFile: the Amber PDB file name of a frame of the trajectory to convert.
530 @type pdbFile: string
531
532 @param amberNetCDFFile: the Amber NetCDF file name of the trajectory to convert.
533 @type amberNetCDFFile: string
534
535 @param outputFile: the name of MMTK NetCDF trajectory output file.
536 @type outputFile: string
537
538 @param timeStep: the timestep that will used when building the MMTK trajectory. Default to 1 ps.
539 @type timeStep: float.
540 """
541
542 self.pdbFile = pdbFile
543 self.amberNetCDFFile = amberNetCDFFile
544 self.outputFile = outputFile
545 self.timeStep = timeStep
546
547
548 self.__convert()
549
551 """Performs the actual conversion.
552 """
553
554
555 try:
556 netcdfInputFile = NetCDFFile(self.amberNetCDFFile, 'r')
557
558 except:
559 raise Error('The Amber NetCDF file %s could not be opened for reading.' % self.amberNetCDFFile)
560
561 else:
562
563 nSteps = len(netcdfInputFile.variables['time'])
564
565
566 if netcdfInputFile.variables.has_key('cell_lengths'):
567
568 box = netcdfInputFile.variables['cell_lengths']
569
570
571 angles = netcdfInputFile.variables['cell_angles']
572
573 cellParams = list(box[0]*Units.Ang) + list(angles[0]*Units.deg)
574
575 universe = ParallelepipedicPeriodicUniverse(basisVectors(cellParams))
576
577 else:
578 universe = InfiniteUniverse()
579
580
581 try:
582
583 conf = PDBConfiguration(self.pdbFile)
584
585 except:
586 raise Error('The PDB file %s could not be opened for reading.' % self.pdbFile)
587
588 else:
589 molecules = conf.createAll()
590 universe.addObject(molecules)
591
592 trajectory = Trajectory(universe, self.outputFile, 'w', 'Converted from AMBER NetCDF file.')
593 snapshot = SnapshotGenerator(universe, actions=[TrajectoryOutput(trajectory, ['all'], 0, None, 1)])
594
595 num = molecules.numberOfAtoms()
596
597 for i in range(nSteps):
598
599 t = float(i+1)*self.timeStep
600
601
602 if universe.is_periodic:
603 cellParams = list(box[i]*Units.Ang) + list(angles[i]*Units.deg)
604 b = basisVectors(cellParams)
605 cell = N.zeros((9,), typecode = N.Float)
606 cell[0:3] = b[0]
607 cell[3:6] = b[1]
608 cell[6:9] = b[2]
609
610 else:
611 cell = None
612
613
614 coordinates = netcdfInputFile.variables['coordinates'][i][:num]*Units.Ang
615
616
617 universe.setConfiguration(Configuration(universe, coordinates, cell))
618
619
620 snapshot(data = {'time': t})
621
622 trajectory.close()
623 netcdfInputFile.close()
624
626 """Converts a CHARMM Trajectory into a MMTK NetCDFFile.
627
628 Comments:
629 - this code is based on the original converter written by Konrad Hinsen.
630 """
631
632 - def __init__(self, pdbFile, dcdFile, outputFile):
633 """
634 The constructor. Will do the conversion.
635
636 @param pdbFile: the CHARMM PDB file name of a frame of the trajectory to convert.
637 @type pdbFile: string
638
639 @param dcdFile: the CHARMM DCD file name of the trajectory to convert.
640 @type dcdFile: string
641
642 @param outputFile: the name of MMTK NetCDF trajectory output file.
643 @type outputFile: string
644 """
645
646 self.pdbFile = pdbFile
647 self.dcdFile = dcdFile
648 self.outputFile = outputFile
649
650
651 self.__convert()
652
654 """Performs the actual conversion.
655 """
656
657
658 dcd = DCDFile(self.dcdFile)
659 step = dcd.istart
660 step_increment = dcd.nsavc
661 dt = dcd.delta
662 unit_cell, x, y, z = dcd.readStep()
663
664
665 if not dcd.has_pbc_data:
666 universe = InfiniteUniverse()
667
668
669 else:
670 universe = ParallelepipedicPeriodicUniverse(basisVectors(unit_cell))
671
672
673
674 conf = PDBConfiguration(self.pdbFile)
675 universe.addObject(conf.createAll())
676
677
678 trajectory = Trajectory(universe, self.outputFile, mode = 'w', comment = dcd.title)
679 snapshot = SnapshotGenerator(universe, actions=[TrajectoryOutput(trajectory, ["all"], 0, None, 1)])
680
681
682 conf_array = universe.configuration().array
683 comp = 0
684 while True:
685 conf_array[:, 0] = x
686 conf_array[:, 1] = y
687 conf_array[:, 2] = z
688
689 if universe.is_periodic:
690 universe.setShape(basisVectors(unit_cell))
691
692 step_data = {'time': step*dt, 'step': step}
693 snapshot(data = step_data)
694 step += step_increment
695
696 try:
697 unit_cell, x, y, z = dcd.readStep()
698 comp += 1
699
700 except EndOfFile:
701 break
702
703
704 trajectory.close()
705
707 """Converts a DL_POLY Trajectory into a MMTK NetCDFFile.
708 """
709
710 - def __init__(self, fieldFile, historyFile, outputFile, specialAtoms = {}):
711 """
712 The constructor. Will do the conversion.
713
714 @param fieldFile: the DL_POLY FIELD file name of the trajectory to convert.
715 @type fieldFile: string
716
717 @param historyFile: the DL_POLY HISTORY file name of the trajectory to convert.
718 @type historyFile: string
719
720 @param outputFile: the name of MMTK NetCDF trajectory output file.
721 @type outputFile: string
722
723 @param specialAtoms: dictionnary of the form {s1 : e1, s2 : e2 ...} where 's1', 's2' ... and 'e1', 'e2' ...
724 are respectively the DL_POLY name and the symbol of atoms 1, 2 ...
725 @type specialAtoms: dict
726 """
727
728 self.fieldFile = fieldFile
729 self.historyFile = historyFile
730 self.outputFile = outputFile
731
732 self.specialAtoms = copy(specialAtoms)
733
734
735 for k,v in self.specialAtoms.items():
736 if k.lower() != k:
737 self.specialAtoms[k.lower()] = v.lower()
738 del self.specialAtoms[k]
739 else:
740 self.specialAtoms[k] = v.lower()
741
742
743 self.__convert()
744
746 """Reads the DL_POLY FIELD file that contains informations about the system.
747 """
748
749 nspecies = 0
750
751
752 fieldFile = open(self.fieldFile, 'r')
753 lines = fieldFile.readlines()
754 fieldFile.close()
755
756 while 1:
757
758 if re.match('MOLECULES', lines[0].upper()):
759
760 nspecies = int(lines[0].split()[1])
761 break
762
763 if re.match('MOLECULAR TYPES', lines[0].upper()):
764
765 nspecies = int(lines[0].split()[2])
766 break
767 lines = lines[1:]
768 lines = lines[1:]
769
770 if nspecies == 0:
771 raise Error('No molecular type was found in your DL_POLY/FIELD file.')
772
773
774 species = []
775 for i in range(nspecies):
776
777 name = lines[0].strip()
778
779 n = int(lines[1].split()[1])
780
781
782 natoms = int(lines[2].split()[1])
783
784 lines = lines[3:]
785 atoms = []
786 while natoms > 0:
787
788
789
790 data = lines[0].split()
791
792 if len(data) < 3:
793 raise Error('The atomic record must contain at least three element.')
794
795 lines = lines[1:]
796 atom_name = data[0].strip().lower()
797
798 element = atom_name
799
800 if element in self.specialAtoms.keys():
801 element = self.specialAtoms[element]
802
803 else:
804
805 element = atom_name
806 while len(element) > 0:
807 try:
808 Atom(element)
809
810 except:
811 element = element[:-1]
812
813 else:
814 break
815
816 if not element:
817 raise Error('The element corresponding to atom %s could not be found in the MMTK Database.' % atom_name)
818
819 try:
820 atomweight = float(data[1])
821
822 except:
823 raise Error('The second value of the atomic record (atom weight) must be a number.')
824
825 try:
826 atomchg = float(data[2])
827
828 except:
829 raise Error('The third value of the atomic record (atom charge) must be a number.')
830
831 try:
832 nrepeat = max(int(data[3]), 1)
833
834 except:
835 nrepeat = 1
836
837 for j in range(nrepeat):
838 atoms.append((element, atom_name))
839
840 natoms = natoms - nrepeat
841
842
843 while ((not re.match('FINISH', lines[0].upper())) and (not re.match('CONSTRAINTS', lines[0].upper()))):
844 lines = lines[1:]
845
846
847
848 constraints = []
849 if re.match('CONSTRAINTS',lines[0].upper()):
850
851 nc = int(lines[0].split()[1])
852 lines = lines[1:]
853
854 while nc > 0:
855 l = lines[0].split()
856 i1 = int(l[0])-1
857 i2 = int(l[1])-1
858 d = float(l[2])*Units.Ang
859 constraints.append((i1, i2, d))
860 lines = lines[1:]
861 nc = nc - 1
862
863 while not re.match('FINISH', lines[0].upper()):
864 lines = lines[1:]
865
866 lines = lines[1:]
867 species.append([name, n, atoms, constraints])
868
869 return species
870
871 - def __readHISTORY(self):
872 """Reads the HISTORY file and write the output trajectory frame by frame.
873 """
874
875 historyFile = open(self.historyFile, 'r')
876
877
878 header = historyFile.readline().strip()
879
880
881
882
883
884 keytrj, imcon, natms = FortranLine(historyFile.readline(), '3I10')
885 if imcon >= 4:
886 raise Error('Box shape not supported by MMTK')
887
888
889 if imcon == 0:
890 universe = InfiniteUniverse()
891
892 else:
893 universe = ParallelepipedicPeriodicUniverse()
894
895 for mol_name, mol_count, atoms, constraints in self.molecules:
896 for i in range(mol_count):
897 number = 0
898 atom_objects = []
899 for element, name in atoms:
900 a = Atom(element, name = name+str(number))
901 a.number = number
902 number = number + 1
903 atom_objects.append(a)
904
905 if len(atom_objects) == 1:
906 universe.addObject(atom_objects[0])
907
908 else:
909
910
911 ac = AtomCluster(atom_objects, name = mol_name)
912 for i1, i2, d in constraints:
913 ac.addDistanceConstraint(atom_objects[i1], atom_objects[i2], d)
914 universe.addObject(ac)
915
916 if keytrj > 0:
917 universe.initializeVelocitiesToTemperature(0.)
918
919
920 trajectory = Trajectory(universe, self.outputFile, mode = 'w', comment = header)
921
922
923 snapshot = SnapshotGenerator(universe, actions = [TrajectoryOutput(trajectory, ["all"], 0, None, 1)])
924
925 conf = universe.configuration()
926 vel = universe.velocities()
927
928
929
930
931 grad = ParticleVector(universe)
932
933
934 history_timestep_line = FortranFormat('A8,4I10,F12.6')
935 history_pbc_line = FortranFormat('3G12.4')
936
937 while 1:
938
939 line = historyFile.readline()
940
941 if not line:
942 break
943
944
945 data = FortranLine(line, history_timestep_line)
946
947
948 nstep = data[1]
949
950
951 natoms = data[2]
952
953 keytrj = data[3]
954
955 imcon = data[4]
956
957
958 tstep = data[5]
959
960 step_data = {'time': nstep*tstep}
961
962 if imcon:
963
964 data = FortranLine(historyFile.readline(), history_pbc_line)
965 dirA = Vector(data)*Units.Ang
966
967
968 data = FortranLine(historyFile.readline(), history_pbc_line)
969 dirB = Vector(data)*Units.Ang
970
971
972 data = FortranLine(historyFile.readline(), history_pbc_line)
973 dirC = Vector(data)*Units.Ang
974
975 cell = N.zeros((9,), typecode = N.Float)
976
977 cell[0:3] = dirA
978 cell[3:6] = dirB
979 cell[6:9] = dirC
980
981 else:
982 cell = None
983
984 for i in range(natoms):
985 historyFile.readline()
986
987 conf.array[i] = [float(v) for v in historyFile.readline().split()]
988
989 if keytrj > 0:
990 vel.array[i] = [float(v) for v in historyFile.readline().split()]
991
992 if keytrj > 1:
993 grad.array[i] = [float(v) for v in historyFile.readline().split()]
994
995 universe.setConfiguration(Configuration(universe, conf.array*Units.Ang, cell))
996
997 if keytrj > 0:
998 universe.setVelocities(vel*Units.Ang/Units.ps)
999
1000 if keytrj > 1:
1001 N.multiply(grad.array, -Units.amu*Units.Ang/Units.ps**2, grad.array)
1002 step_data['gradients'] = grad
1003
1004 snapshot(data = step_data)
1005
1006 trajectory.close()
1007
1008 historyFile.close()
1009
1011 """Performs the actual conversion.
1012 """
1013
1014
1015
1016
1017
1018 self.molecules = self.__readFIELD()
1019
1020 self.__readHISTORY()
1021
1023 """Converts a MaterialsStudio Discover or Forcite Trajectory into a MMTK NetCDFFile.
1024 """
1025
1026 atomLineFormat = FortranFormat('A5,1X,F14.9,1X,F14.9,1X,F14.9,1X,A4,1X,A7,A7,1X,A2,1X,F6.3')
1027
1028 - def __init__(self, module, xtdxsdFile, histrjFile, outputFile, subselection = None):
1029 """
1030 The constructor. Will do the conversion.
1031
1032 @param module: a string being one of 'Discover' or 'Forcite' specifying which module of MaterialsStudio the
1033 trajectory is coming from.
1034 @type module: string
1035
1036 @param xtdxsdFile: the MaterialsStudio XTD or XSD file name of the trajectory to convert.
1037 @type xtdxsdFile: string
1038
1039 @param histrjFile: the MaterialsStudio HIS (Discover) or TRJ (Forcite) file name of the trajectory to convert.
1040 @type histrjFile: string
1041
1042 @param outputFile: the name of MMTK NetCDF trajectory output file.
1043 @type outputFile: string
1044
1045 @param subselection: if not None, list of the indexes (integer >= 1) of the atoms to select when writing out
1046 the MMTK trajectory. The order being the one defined in the XTD/XSD file.
1047 @type subselection: list
1048 """
1049
1050 self.module = module
1051 self.xtdxsdFile = xtdxsdFile
1052 self.histrjFile = histrjFile
1053 self.outputFile = outputFile
1054 self.subselection = subselection
1055
1056
1057 self.__convert()
1058
1060
1061 if at.has_key('cluster'):
1062 return
1063
1064 at['cluster'] = True
1065
1066 clust.append(at)
1067
1068 for cAt in at['connections']:
1069 self.createCluster(self.atomContents[self.atomIndexes[cAt]], clust)
1070
1072 """Reads the Materials Studio XTD or XSD file and set up the universe from which the NetCDF
1073 MMTK trajectory will be written.
1074
1075 @note: the XTD and XSD file are xml file.
1076 """
1077
1078 xmlFile = xml.dom.minidom.parse(self.xtdxsdFile)
1079
1080 identityMapping = xmlFile.getElementsByTagName('IdentityMapping')[0]
1081
1082 atomsTag = identityMapping.getElementsByTagName('Atom3d')
1083
1084 try:
1085 spaceGroup = identityMapping.getElementsByTagName('SpaceGroup')[0]
1086
1087 self.normalizedCell = N.zeros((9,), typecode = N.Float)
1088 self.normalizedCell[0:3] = Vector([float(v) for v in spaceGroup.getAttribute('AVector').split(',')]).normal()
1089 self.normalizedCell[3:6] = Vector([float(v) for v in spaceGroup.getAttribute('BVector').split(',')]).normal()
1090 self.normalizedCell[6:9] = Vector([float(v) for v in spaceGroup.getAttribute('CVector').split(',')]).normal()
1091
1092 self.universe = ParallelepipedicPeriodicUniverse()
1093
1094 except:
1095 LogMessage('No SpaceGroup tag found in the xtd/xsd file. The MMTK universe created will be an infinite universe.')
1096 self.universe = InfiniteUniverse()
1097
1098 self.clusters = []
1099 if self.subselection is None:
1100
1101 bondsTag = xmlFile.getElementsByTagName('IdentityMapping')[0].getElementsByTagName('Bond')
1102 bondsContents = {}
1103 for bd in bondsTag:
1104 bdIndex = int(bd.getAttribute('ID'))
1105 bondsContents[bdIndex] = [int(v) for v in bd.getAttribute('Connects').split(',')]
1106
1107 self.atomIndexes = {}
1108 self.atomContents = []
1109 comp = 0
1110 for at in atomsTag:
1111 atId = int(at.getAttribute('ID'))
1112 element = str(at.getAttribute('Components')).split(',')[0].strip()
1113 coord = N.array([float(v) for v in at.getAttribute('XYZ').split(',')])*Units.Ang
1114
1115 connections = []
1116 if at.hasAttribute('Connections'):
1117 connectedBonds = [int(v) for v in at.getAttribute('Connections').split(',')]
1118 for b in connectedBonds:
1119 if bondsContents.has_key(b):
1120 for bb in bondsContents[b]:
1121 if bb != atId:
1122 connections.append(bb)
1123
1124 data = {'coord' : coord, 'element' : element, 'connections' : connections, 'serial_number' : comp}
1125 self.atomContents.append(data)
1126
1127 self.atomIndexes[atId] = comp
1128
1129 comp +=1
1130
1131 for at in self.atomContents:
1132
1133 if at.has_key('cluster'):
1134 continue
1135
1136 else:
1137 clust = []
1138 self.createCluster(at, clust)
1139 self.clusters.append(clust)
1140
1141 else:
1142
1143 for cInfo in self.subselection:
1144
1145 subCluster = []
1146 for ind in cInfo:
1147 atXML = atomsTag[ind-1]
1148 element = str(atXML.getAttribute('Components')).split(',')[0].strip()
1149 data = {'coord' : N.array([float(v) for v in atXML.getAttribute('XYZ').split(',')])*Units.Ang,
1150 'element' : element,
1151 'serial_number' : ind - 1}
1152 subCluster.append(data)
1153 self.clusters.append(subCluster)
1154
1155 xmlFile.unlink()
1156
1157 for c in range(len(self.clusters)):
1158 clust = self.clusters[c]
1159 atoms = []
1160 atomNames = {}
1161 for atom in clust:
1162 if atomNames.has_key(atom['element']):
1163 atomNames[atom['element']] += 1
1164 else:
1165 atomNames[atom['element']] = 1
1166 atoms.append(Atom(atom['element'], name = str(atomNames[atom['element']])))
1167
1168 clustName = ''
1169 for k, v in atomNames.items():
1170 clustName += '%s%d' % (k,v)
1171 clustName += '_' + str(c+1)
1172 self.universe.addObject(AtomCluster(atoms, name = clustName))
1173
1175 """Reads a Materials Studio HIS file and fills up the NetCDF trajectory file.
1176 """
1177
1178 selCoordinates = N.zeros((self.universe.numberOfAtoms(),3), typecode = N.Float)
1179 selVelocities = N.zeros((self.universe.numberOfAtoms(),3), typecode = N.Float)
1180
1181
1182 trajectory = Trajectory(self.universe, self.outputFile, mode = 'w', comment = 'From Discover trajectory file')
1183
1184 snapshot = SnapshotGenerator(self.universe, actions = [TrajectoryOutput(trajectory,["all"], 0, None, 1)])
1185
1186
1187 hisFile = open(self.histrjFile,'rb')
1188
1189
1190 rec = '!4xi8x'
1191 recSize = calcsize(rec)
1192 hisFile.read(recSize)
1193
1194
1195 rec = '!' + '4s' * 20 + 'd8x'
1196 recSize = calcsize(rec)
1197 data = struct.unpack(rec, hisFile.read(recSize))
1198 Vershn = data[20]
1199
1200 LogMessage('info','Vershn -- > %s' % Vershn,['file'])
1201
1202
1203 rec = '!' + '4s' * 20 + '8x'
1204 recSize = calcsize(rec)
1205 hisFile.read(recSize)
1206
1207
1208 rec = '!' + '4s' * 20 + '8x'
1209 recSize = calcsize(rec)
1210 hisFile.read(recSize)
1211
1212
1213 rec = '!i'
1214 recSize = calcsize(rec)
1215 data = struct.unpack(rec, hisFile.read(recSize))
1216 NAtTyp = data[0]
1217
1218 rec = '!' + '4s' * NAtTyp + '%sd8x' % NAtTyp
1219 recSize = calcsize(rec)
1220 hisFile.read(recSize)
1221
1222
1223 rec = '!i'
1224 recSize = calcsize(rec)
1225 data = struct.unpack(rec, hisFile.read(recSize))
1226 NNmRes = data[0]
1227
1228 rec = '!' + '4s' * NNmRes + '8x'
1229 recSize = calcsize(rec)
1230 hisFile.read(recSize)
1231
1232
1233 rec = '!i'
1234 recSize = calcsize(rec)
1235 data = struct.unpack(rec, hisFile.read(recSize))
1236 NAtoms = data[0]
1237
1238 rec = '!%si' % NAtoms
1239 recSize = calcsize(rec)
1240 hisFile.read(recSize)
1241
1242 if Vershn < 2.9:
1243 rec = '!' + '4s' * NAtoms
1244
1245 else:
1246 rec = '!' + '5s' * NAtoms
1247
1248 recSize = calcsize(rec)
1249 hisFile.read(recSize)
1250
1251 hisFile.read(calcsize('!8x'))
1252
1253
1254 rec = '!ii'
1255 recSize = calcsize(rec)
1256 data = struct.unpack(rec, hisFile.read(recSize))
1257
1258 NAtMov = data[1]
1259
1260 if Vershn >= 2.6:
1261 rec = '!%si' % NAtMov
1262 recSize = calcsize(rec)
1263 data = struct.unpack(rec, hisFile.read(recSize))
1264 movableatoms = data
1265 LogMessage('info','movableatoms -- > %s' % str(movableatoms),['file'])
1266
1267 hisFile.read(calcsize('!8x'))
1268
1269
1270 rec = '!i'
1271 recSize = calcsize(rec)
1272 data = struct.unpack(rec, hisFile.read(recSize))
1273 NMol = data[0]
1274
1275 rec = '!%si%si' % (NMol,NMol)
1276 recSize = calcsize(rec)
1277 hisFile.read(recSize)
1278
1279 hisFile.read(calcsize('!8x'))
1280
1281
1282 rec = '!i'
1283 recSize = calcsize(rec)
1284 data = struct.unpack(rec, hisFile.read(recSize))
1285 NRes = data[0]
1286
1287 rec = '!%si%si' % (NRes*2,NRes)
1288 recSize = calcsize(rec)
1289 hisFile.read(recSize)
1290
1291 hisFile.read(calcsize('!8x'))
1292
1293
1294 rec = '!i'
1295 recSize = calcsize(rec)
1296 data = struct.unpack(rec, hisFile.read(recSize))
1297 NBonds = data[0]
1298
1299 if NBonds > 0 :
1300 rec = '!%si' % 2*NBonds
1301 recSize = calcsize(rec)
1302 hisFile.read(recSize)
1303
1304 hisFile.read(calcsize('!8x'))
1305
1306
1307 rec = '!4149di4di6d6i8x'
1308 recSize = calcsize(rec)
1309 data = struct.unpack(rec, hisFile.read(recSize))
1310
1311
1312 rec = '!idii8x'
1313 recSize = calcsize(rec)
1314 data = struct.unpack(rec, hisFile.read(recSize))
1315 NEner, timestep, frequency, startingstep = data
1316
1317 LogMessage('info','NEner -- > %s' % NEner,['file'])
1318 LogMessage('info','timestep -- > %s fs' % timestep,['file'])
1319 LogMessage('info','frequency -- > %s step(s)' % frequency,['file'])
1320 LogMessage('info','startingstep -- > %s' % startingstep,['file'])
1321
1322
1323 nrjBlockSize = 59+5*NMol+NEner+NMol*NEner
1324 rec = '!%sd8x' % nrjBlockSize
1325 recSize = calcsize(rec)
1326 data = struct.unpack(rec, hisFile.read(recSize))
1327 totalenergy = data[0]
1328
1329 LogMessage('info','totalenergy -- > %s' % totalenergy,['file'])
1330
1331 nAtoms3 = 3*NAtoms
1332
1333
1334 rec = '!%sf8x' % nAtoms3
1335 recSize = calcsize(rec)
1336 data = struct.unpack(rec, hisFile.read(recSize))
1337 firstatomxyz = data[0:3]
1338
1339 LogMessage('info','firstatomxyz -- > %s' % str(firstatomxyz),['file'])
1340
1341
1342 rec = '!%sf8x' % nAtoms3
1343 recSize = calcsize(rec)
1344 data = struct.unpack(rec, hisFile.read(recSize))
1345 firstatomvel = data[0:3]
1346
1347 LogMessage('info','firstatomvel -- > %s' % str(firstatomvel),['file'])
1348
1349
1350 frame = 0
1351 while True:
1352
1353 try:
1354
1355 rec = '!i8x'
1356 recSize = calcsize(rec)
1357 hisFile.read(recSize)
1358
1359
1360 rec = '!%sd8x' % nrjBlockSize
1361 recSize = calcsize(rec)
1362 hisFile.read(recSize)
1363
1364
1365 rec = '!6d9d8x'
1366 recSize = calcsize(rec)
1367 data = struct.unpack(rec, hisFile.read(recSize))
1368
1369 cell = N.array(data[6:])*Units.Ang
1370 LogMessage('info','cell -- > %s' % str(cell),['file'])
1371
1372
1373 rec = '!%sf8x' % nAtoms3
1374 recSize = calcsize(rec)
1375 data = struct.unpack(rec, hisFile.read(recSize))
1376 xyz = N.array(data).reshape((NAtoms,3))*Units.Ang
1377
1378
1379 rec = '!%sf' % nAtoms3
1380 recSize = calcsize(rec)
1381 data = struct.unpack(rec, hisFile.read(recSize))
1382 vel = N.array(data).reshape((NAtoms,3))*Units.Ang
1383
1384 hisFile.read(calcsize('!8x'))
1385
1386 except:
1387 break
1388
1389 else:
1390 frame += 1
1391
1392 comp = 0
1393 for clust in self.clusters:
1394 for subClust in clust:
1395 selCoordinates[comp,:] = xyz[subClust['serial_number'],:]
1396 selVelocities[comp,:] = vel[subClust['serial_number'],:]
1397 comp += 1
1398
1399 if not self.universe.is_periodic:
1400 cell = None
1401 self.universe.setConfiguration(Configuration(self.universe, selCoordinates, cell))
1402 self.universe.setVelocities(ParticleVector(self.universe, selVelocities))
1403
1404 snapshot(data = {'time': (startingstep + frequency*frame)*timestep*Units.fs})
1405
1406
1407 hisFile.close()
1408
1409 trajectory.close()
1410
1412 """Reads a Materials Studio HIS file and fills up the NetCDF trajectory file.
1413 """
1414
1415 selCoordinates = N.zeros((self.universe.numberOfAtoms(),3), typecode = N.Float)
1416 selVelocities = N.zeros((self.universe.numberOfAtoms(),3), typecode = N.Float)
1417
1418
1419 trajectory = Trajectory(self.universe, self.outputFile, mode = 'w', comment = 'From Forcite trajectory file')
1420
1421 snapshot = SnapshotGenerator(self.universe, actions = [TrajectoryOutput(trajectory,["all"], 0, None, 1)])
1422
1423
1424 trjFile = open(self.histrjFile,'rb')
1425
1426
1427 rec = '!4x4s20i8x'
1428 recSize = struct.calcsize(rec)
1429 data = struct.unpack(rec, trjFile.read(recSize))
1430
1431 HDR = data[0]
1432 LogMessage('info','HDR -- > %s' % HDR,['file'])
1433
1434 ICNTRL = data[1:]
1435 LogMessage('info','ICNTRL -- > %s' % str(ICNTRL),['file'])
1436
1437 if ICNTRL[0] == 2000:
1438 prec = 'f'
1439
1440 elif ICNTRL[0] == 2010:
1441 prec = 'd'
1442
1443 elif ICNTRL[0] == 3000:
1444 prec = 'd'
1445
1446 else:
1447 LogMessage('warning','Unknown trj version number: %s.' % ICNTRL[0],['gui'])
1448 return
1449
1450
1451 rec = '!i'
1452 recSize = struct.calcsize(rec)
1453 data = struct.unpack(rec, trjFile.read(recSize))
1454 NTRJTI, = data
1455
1456 rec = '!' + '80s'* NTRJTI
1457 recSize = struct.calcsize(rec)
1458 trjFile.read(recSize)
1459
1460 trjFile.read(calcsize('!8x'))
1461
1462
1463 rec = '!i'
1464 recSize = struct.calcsize(rec)
1465 data = struct.unpack(rec, trjFile.read(recSize))
1466 NEEXTI, = data
1467
1468 rec = '!' + '80s'* NEEXTI
1469 recSize = struct.calcsize(rec)
1470 trjFile.read(recSize)
1471
1472 trjFile.read(calcsize('!8x'))
1473
1474
1475 rec = '!8i8x'
1476 recSize = struct.calcsize(rec)
1477 data = struct.unpack(rec, trjFile.read(recSize))
1478
1479 PERTYPE, MOLXTL, LCANON, DEFCEL, PRTTHRM, LNOSE, LNPECAN, LTMPDAMP = data
1480
1481 LogMessage('info','PERTYPE -- > %s' % PERTYPE,['file'])
1482 LogMessage('info','MOLXTL -- > %s' % MOLXTL,['file'])
1483 LogMessage('info','LCANON -- > %s' % LCANON,['file'])
1484 LogMessage('info','DEFCEL -- > %s' % DEFCEL,['file'])
1485 LogMessage('info','PRTTHRM -- > %s' % PRTTHRM,['file'])
1486 LogMessage('info','LNOSE -- > %s' % LNOSE,['file'])
1487 LogMessage('info','LNPECAN -- > %s' % LNPECAN,['file'])
1488 LogMessage('info','LTMPDAMP -- > %s' % LTMPDAMP,['file'])
1489
1490
1491 rec = '!i'
1492 recSize = struct.calcsize(rec)
1493 data = struct.unpack(rec, trjFile.read(recSize))
1494 NFLUSD, = data
1495
1496
1497 rec = '!%si%si' % (NFLUSD, NFLUSD) + '8s'* NFLUSD + '8x'
1498 recSize = struct.calcsize(rec)
1499 data = struct.unpack(rec, trjFile.read(recSize))
1500
1501 MVATPF = data[0:NFLUSD]
1502 NATPFU = data[NFLUSD:2*NFLUSD]
1503 DECUSD = data[2*NFLUSD:3*NFLUSD]
1504
1505 LogMessage('info','MVATPF -- > %s' % str(MVATPF),['file'])
1506 LogMessage('info','NATPFU -- > %s' % str(NATPFU),['file'])
1507 LogMessage('info','DECUSD -- > %s' % str(DECUSD),['file'])
1508
1509 rec = '!i'
1510 recSize = struct.calcsize(rec)
1511 data = struct.unpack(rec, trjFile.read(recSize))
1512
1513 TOTMOV = data[0]
1514
1515 LogMessage('info','TOTMOV -- > %s' % str(TOTMOV),['file'])
1516
1517 rec = '!%si8x' % TOTMOV
1518 recSize = struct.calcsize(rec)
1519 data = struct.unpack(rec, trjFile.read(recSize))
1520
1521 MVOFST = [d-1 for d in data]
1522
1523 LogMessage('info','MVOFST -- > %s' % str(MVOFST),['file'])
1524
1525
1526 rec = '!i'
1527 recSize = struct.calcsize(rec)
1528 data = struct.unpack(rec, trjFile.read(recSize))
1529
1530 LEEXTI, = data
1531
1532 rec = '!' + '%ss'% LEEXTI
1533 recSize = struct.calcsize(rec)
1534 trjFile.read(recSize)
1535
1536 trjFile.read(calcsize('!8x'))
1537
1538
1539 rec = '!i'
1540 recSize = struct.calcsize(rec)
1541 data = struct.unpack(rec, trjFile.read(recSize))
1542
1543 LPARTI, = data
1544
1545 rec = '!' + '%ss'% LPARTI
1546 recSize = struct.calcsize(rec)
1547 trjFile.read(recSize)
1548
1549 trjFile.read(calcsize('!8x'))
1550
1551 while True:
1552
1553 try:
1554
1555
1556
1557 if ICNTRL[0] == 2000:
1558 rec = '!%si33%s5i8x' % (prec, prec)
1559 velInd = 37
1560
1561 elif ICNTRL[0] == 2010:
1562 rec = '!%si57%s6i8x' % (prec, prec)
1563 velInd = 61
1564
1565 elif ICNTRL[0] == 3000:
1566 rec = '!%si58%s6i8x' % (prec, prec)
1567 velInd = 62
1568
1569 recSize = struct.calcsize(rec)
1570 data = struct.unpack(rec, trjFile.read(recSize))
1571
1572 CurrentTime = data[0]
1573 itstep = data[1]
1574 Temp = data[2]
1575 AvgTemp = data[3]
1576 TimeStep = data[4]
1577 InitialTemp = data[5]
1578 FinalTemp = data[6]
1579 TotalPE = data[7]
1580 VelocityWritten = data[velInd]
1581
1582 if ICNTRL[0] == 2000:
1583 ForcesWritten = False
1584
1585 elif ICNTRL[0] == 2010:
1586 ForcesWritten = data[62]
1587 elif ICNTRL[0] == 3000:
1588 ForcesWritten = data[63]
1589
1590
1591 rec = '!12%s8x' % prec
1592 recSize = struct.calcsize(rec)
1593 data = struct.unpack(rec, trjFile.read(recSize))
1594
1595 Press = data[0]
1596 Volume = data[1]
1597 Junk = data[2:5]
1598 GyrationRadius = data[5]
1599 AvgPress = data[6]
1600 AvgVolume = data[7]
1601 Junk = data[8:11]
1602 AvgGyrationRadius = data[11]
1603
1604
1605 if LCANON:
1606 rec = '!4%s8x' % prec
1607 recSize = struct.calcsize(rec)
1608 data = struct.unpack(rec, trjFile.read(recSize))
1609
1610 if LNOSE:
1611 snose = data[0]
1612 snoseh = data[1]
1613 dsstot = data[2]
1614 dqcanonNose = data[3]
1615
1616 else:
1617 signose = data[0]
1618 zfrict = data[1]
1619 dzprfrict = data[2]
1620 dqcanon = data[3]
1621
1622
1623 if DEFCEL > 0:
1624 rec = '!22%s8x' % prec
1625 recSize = struct.calcsize(rec)
1626 data = struct.unpack(rec, trjFile.read(recSize))
1627
1628 a = data[2]*Units.Ang
1629 b = data[3]*Units.Ang
1630 c = data[4]*Units.Ang
1631
1632 cell = N.zeros((9,), typecode = N.Float)
1633 cell[0:3] = a*self.normalizedCell[0:3]
1634 cell[3:6] = b*self.normalizedCell[3:6]
1635 cell[6:9] = c*self.normalizedCell[6:9]
1636
1637
1638 if PERTYPE > 0:
1639 rec = '!i14%s8x' % prec
1640 recSize = struct.calcsize(rec)
1641 data = struct.unpack(rec, trjFile.read(recSize))
1642
1643 NUnitCellAtoms = data[0]
1644 Junk = data[1:15]
1645
1646
1647 if LNPECAN:
1648 rec = '!3%s8x' % prec
1649 recSize = struct.calcsize(rec)
1650 trjFile.read(recSize)
1651
1652
1653 if LTMPDAMP:
1654 rec = '!%s8x' % prec
1655 recSize = struct.calcsize(rec)
1656 trjFile.read(recSize)
1657
1658
1659 rec = '!%s%s' % (TOTMOV, prec)
1660 recSize = struct.calcsize(rec)
1661 data = struct.unpack(rec, trjFile.read(recSize))
1662
1663 XCOORD = N.array(data[0:TOTMOV])*Units.Ang
1664 trjFile.read(calcsize('!8x'))
1665
1666
1667 rec = '!%s%s' % (TOTMOV, prec)
1668 recSize = struct.calcsize(rec)
1669 data = struct.unpack(rec, trjFile.read(recSize))
1670
1671 YCOORD = N.array(data[0:TOTMOV])*Units.Ang
1672 trjFile.read(calcsize('!8x'))
1673
1674
1675 rec = '!%s%s' % (TOTMOV, prec)
1676 recSize = struct.calcsize(rec)
1677 data = struct.unpack(rec, trjFile.read(recSize))
1678
1679 ZCOORD = N.array(data[0:TOTMOV])*Units.Ang
1680 trjFile.read(calcsize('!8x'))
1681
1682 if VelocityWritten:
1683
1684 rec = '!%s%s' % (TOTMOV, prec)
1685 recSize = struct.calcsize(rec)
1686 data = struct.unpack(rec, trjFile.read(recSize))
1687
1688 XVelocity = N.array(data[0:TOTMOV])*Units.Ang
1689 trjFile.read(calcsize('!8x'))
1690
1691
1692 rec = '!%s%s' % (TOTMOV, prec)
1693 recSize = struct.calcsize(rec)
1694 data = struct.unpack(rec, trjFile.read(recSize))
1695
1696 YVelocity = N.array(data[0:TOTMOV])*Units.Ang
1697 trjFile.read(calcsize('!8x'))
1698
1699
1700 rec = '!%s%s' % (TOTMOV, prec)
1701 recSize = struct.calcsize(rec)
1702 data = struct.unpack(rec, trjFile.read(recSize))
1703
1704 ZVelocity = N.array(data[0:TOTMOV])*Units.Ang
1705 trjFile.read(calcsize('!8x'))
1706
1707 if ForcesWritten:
1708
1709 rec = '!%s%s' % (TOTMOV, prec)
1710 recSize = struct.calcsize(rec)
1711 trjFile.read(recSize)
1712
1713 trjFile.read(calcsize('!8x'))
1714
1715
1716 rec = '!%s%s' % (TOTMOV, prec)
1717 recSize = struct.calcsize(rec)
1718 trjFile.read(recSize)
1719
1720 trjFile.read(calcsize('!8x'))
1721
1722
1723 rec = '!%s%s' % (TOTMOV, prec)
1724 recSize = struct.calcsize(rec)
1725 trjFile.read(recSize)
1726
1727 trjFile.read(calcsize('!8x'))
1728
1729 except:
1730 break
1731
1732 else:
1733
1734 comp = 0
1735 for clust in self.clusters:
1736
1737 for subClust in clust:
1738 if subClust['serial_number'] in MVOFST:
1739 ind = MVOFST.index(subClust['serial_number'])
1740 selCoordinates[comp,0] = XCOORD[ind]
1741 selCoordinates[comp,1] = YCOORD[ind]
1742 selCoordinates[comp,2] = ZCOORD[ind]
1743 selVelocities[comp,0] = XVelocity[ind]
1744 selVelocities[comp,1] = YVelocity[ind]
1745 selVelocities[comp,2] = ZVelocity[ind]
1746
1747 else:
1748 selCoordinates[comp,:] = subClust['coord']
1749
1750 comp += 1
1751
1752 if not self.universe.is_periodic:
1753 cell = None
1754
1755 self.universe.setConfiguration(Configuration(self.universe, selCoordinates, cell))
1756 self.universe.setVelocities(ParticleVector(self.universe, selVelocities))
1757
1758 snapshot(data = {'time': CurrentTime})
1759
1760 trjFile.close()
1761
1762 trajectory.close()
1763
1765 """Performs the actual conversion.
1766 """
1767
1768 self.readXTDFile()
1769
1770 if self.module == 'Discover':
1771 self.readHISFile()
1772
1773 elif self.module == 'Forcite':
1774 self.readTRJFile()
1775
1777 """Converts a NAMD Trajectory into a MMTK NetCDFFile.
1778
1779 Comments:
1780 - this code is based on the original converter written by Konrad Hinsen.
1781 """
1782
1783 - def __init__(self, pdbFile, dcdFile, xstFile, outputFile):
1784 """
1785 The constructor. Will do the conversion.
1786
1787 @param pdbFile: the NAMD PDB file name of a frame of the trajectory to convert.
1788 @type pdbFile: string
1789
1790 @param dcdFile: the NAMD DCD file name of the trajectory to convert.
1791 @type dcdFile: string
1792
1793 @param xstFile: the NAMD XSTfile name of the trajectory to convert.
1794 @type xstFile: string
1795
1796 @param outputFile: the name of MMTK NetCDF trajectory output file.
1797 @type outputFile: string
1798 """
1799
1800 self.pdbFile = pdbFile
1801 self.dcdFile = dcdFile
1802 self.xstFile = xstFile
1803 self.outputFile = outputFile
1804
1805
1806 self.__convert()
1807
1809 """Performs the actual conversion."""
1810
1811
1812 if self.xstFile is not None:
1813 xst = open(self.xstFile, 'r')
1814 data = xst.readlines()[3:]
1815 xst.close()
1816 xstData = []
1817 for line in data:
1818 d = N.reshape(N.array([float(v) for v in line.split()[1:10]]),(3,3))
1819 xstData.append([Vector(v)*Units.Ang for v in d])
1820
1821 universe = ParallelepipedicPeriodicUniverse(xstData[0])
1822
1823 else:
1824 universe = InfiniteUniverse()
1825
1826
1827 dcd = DCDFile(self.dcdFile)
1828 step = dcd.istart
1829 step_increment = dcd.nsavc
1830 dt = dcd.delta
1831 unit_cell, x, y, z = dcd.readStep()
1832
1833
1834
1835 conf = PDBConfiguration(self.pdbFile)
1836 universe.addObject(conf.createAll())
1837
1838
1839 trajectory = Trajectory(universe, self.outputFile, mode = 'w', comment = dcd.title)
1840 snapshot = SnapshotGenerator(universe, actions=[TrajectoryOutput(trajectory, ["all"], 0, None, 1)])
1841
1842
1843 conf_array = universe.configuration().array
1844 comp = 0
1845 while True:
1846 conf_array[:, 0] = x
1847 conf_array[:, 1] = y
1848 conf_array[:, 2] = z
1849
1850 if universe.is_periodic:
1851 universe.setShape(xstData[comp])
1852
1853 step_data = {'time': step*dt, 'step': step}
1854 snapshot(data = step_data)
1855 step += step_increment
1856
1857 try:
1858 unit_cell, x, y, z = dcd.readStep()
1859 comp += 1
1860
1861 except EndOfFile:
1862 break
1863
1864
1865 trajectory.close()
1866
1868 """Converts a VASP Trajectory into a MMTK NetCDFFile."""
1869
1870 - def __init__(self, contcarFile, xdatcarFile, outputFile, atomContents):
1871 """
1872 The constructor. Will do the conversion.
1873
1874 @param contcarFile: the VASP CONTCAR or POSCAR file name of the trajectory to convert.
1875 @type contcarFile: string
1876
1877 @param xdatcarFile: the VASP XDATCAR file name of the trajectory to convert.
1878 @type xdatcarFile: string
1879
1880 @param outputFile: the name of MMTK NetCDF trajectory output file.
1881 @type outputFile: string
1882
1883 @param atomContents: List of the element names (string) in the order they appear in the
1884 trajectory.
1885 @type atomContents: list
1886 """
1887
1888 self.contcarFile = contcarFile
1889 self.xdatcarFile = xdatcarFile
1890 self.outputFile = outputFile
1891 self.atomContents = atomContents
1892
1893
1894 self.__convert()
1895
1897 """Performs the actual conversion."""
1898
1899
1900 contcarFile = open(self.contcarFile, 'r')
1901
1902
1903 firstLine = contcarFile.readline().split()
1904
1905 try:
1906 clustName = firstLine[firstLine.index('System=')+1]
1907 except:
1908 clustName = 'Cluster'
1909
1910
1911 scale_factor = float(contcarFile.readline().split()[0])
1912
1913
1914 dirA = Vector([float(v) for v in contcarFile.readline().split()])*Units.Ang*scale_factor
1915 dirB = Vector([float(v) for v in contcarFile.readline().split()])*Units.Ang*scale_factor
1916 dirC = Vector([float(v) for v in contcarFile.readline().split()])*Units.Ang*scale_factor
1917
1918
1919 universe = ParallelepipedicPeriodicUniverse((dirA, dirB, dirC), None)
1920
1921
1922
1923 numberOfAtomsPerType = [int(v) for v in contcarFile.readline().split()]
1924
1925
1926 contcarFile.close()
1927
1928 mmtkAtomList = []
1929
1930 for i in range(len(self.atomContents)):
1931 for l in range(numberOfAtomsPerType[i]):
1932 mmtkAtomList.append(Atom(self.atomContents[i], name = self.atomContents[i] + str(l+1)))
1933
1934 universe.addObject(AtomCluster(mmtkAtomList, name = clustName))
1935
1936
1937 xdatcarFile = open(self.xdatcarFile, 'r')
1938 nAtoms = int(xdatcarFile.readline().split()[0])
1939
1940 if nAtoms != universe.numberOfAtoms():
1941 raise Error('Wrong number of atoms.')
1942
1943
1944 timestep = float(xdatcarFile.readline().split()[-1])*Units.s
1945 xdatcarFile.readline()
1946 xdatcarFile.readline()
1947 xdatcarFile.readline()
1948
1949
1950 trajectory = Trajectory(universe, self.outputFile, mode = 'w', comment = 'From VASP file')
1951 snapshot = SnapshotGenerator(universe, actions = [TrajectoryOutput(trajectory,["all"], 0, None, 1)])
1952
1953 step = 0
1954 while True:
1955 t = step*timestep
1956
1957 junk = xdatcarFile.readline()
1958
1959 if not junk:
1960 break
1961
1962 try:
1963
1964 for at in range(len(universe.atomList())):
1965 line = xdatcarFile.readline()
1966
1967 coord = Vector([float(v) for v in line.split()[0:3]])
1968
1969 coord = universe.boxToRealCoordinates(coord)
1970
1971 universe.atomList()[at].setPosition(coord)
1972
1973 snapshot(data = {'time': t})
1974 step += 1
1975
1976 except:
1977 break
1978
1979
1980 xdatcarFile.close()
1981 trajectory.close()
1982