1 """This module encapsulates a document stored in a GNUmed database."""
2
3 __author__ = "Karsten Hilbert <Karsten.Hilbert@gmx.net>"
4 __license__ = "GPL v2 or later"
5
6 import sys, os, shutil, os.path, types, time, logging
7
8
9 if __name__ == '__main__':
10 sys.path.insert(0, '../../')
11 from Gnumed.pycommon import gmExceptions
12 from Gnumed.pycommon import gmBusinessDBObject
13 from Gnumed.pycommon import gmPG2
14 from Gnumed.pycommon import gmTools
15 from Gnumed.pycommon import gmMimeLib
16 from Gnumed.pycommon import gmDateTime
17
18
19 _log = logging.getLogger('gm.docs')
20
21 MUGSHOT=26
22 DOCUMENT_TYPE_VISUAL_PROGRESS_NOTE = u'visual progress note'
23 DOCUMENT_TYPE_PRESCRIPTION = u'prescription'
24
26 """Represents a folder with medical documents for a single patient."""
27
29 """Fails if
30
31 - patient referenced by aPKey does not exist
32 """
33 self.pk_patient = aPKey
34 if not self._pkey_exists():
35 raise gmExceptions.ConstructorError, "No patient with PK [%s] in database." % aPKey
36
37
38
39
40
41
42
43 _log.debug('instantiated document folder for patient [%s]' % self.pk_patient)
44
47
48
49
51 """Does this primary key exist ?
52
53 - true/false/None
54 """
55
56 rows, idx = gmPG2.run_ro_queries(queries = [
57 {'cmd': u"select exists(select pk from dem.identity where pk = %s)", 'args': [self.pk_patient]}
58 ])
59 if not rows[0][0]:
60 _log.error("patient [%s] not in demographic database" % self.pk_patient)
61 return None
62 return True
63
64
65
67 cmd = u"""
68 SELECT pk_doc
69 FROM blobs.v_doc_med
70 WHERE
71 pk_patient = %(pat)s
72 AND
73 type = %(typ)s
74 AND
75 ext_ref = %(ref)s
76 ORDER BY
77 clin_when DESC
78 LIMIT 1
79 """
80 args = {
81 'pat': self.pk_patient,
82 'typ': DOCUMENT_TYPE_PRESCRIPTION,
83 'ref': u'FreeDiams'
84 }
85 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}])
86 if len(rows) == 0:
87 _log.info('no FreeDiams prescription available for patient [%s]' % self.pk_patient)
88 return None
89 prescription = cDocument(aPK_obj = rows[0][0])
90 return prescription
91
93 cmd = u"SELECT pk_obj FROM blobs.v_latest_mugshot WHERE pk_patient = %s"
94 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': [self.pk_patient]}])
95 if len(rows) == 0:
96 _log.info('no mugshots available for patient [%s]' % self.pk_patient)
97 return None
98 return cDocumentPart(aPK_obj = rows[0][0])
99
100 latest_mugshot = property(get_latest_mugshot, lambda x:x)
101
103 if latest_only:
104 cmd = u"select pk_doc, pk_obj from blobs.v_latest_mugshot where pk_patient=%s"
105 else:
106 cmd = u"""
107 select
108 vdm.pk_doc as pk_doc,
109 dobj.pk as pk_obj
110 from
111 blobs.v_doc_med vdm
112 blobs.doc_obj dobj
113 where
114 vdm.pk_type = (select pk from blobs.doc_type where name = 'patient photograph')
115 and vdm.pk_patient = %s
116 and dobj.fk_doc = vdm.pk_doc
117 """
118 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': [self.pk_patient]}])
119 return rows
120
122 """return flat list of document IDs"""
123
124 args = {
125 'ID': self.pk_patient,
126 'TYP': doc_type
127 }
128
129 cmd = u"""
130 select vdm.pk_doc
131 from blobs.v_doc_med vdm
132 where
133 vdm.pk_patient = %%(ID)s
134 %s
135 order by vdm.clin_when"""
136
137 if doc_type is None:
138 cmd = cmd % u''
139 else:
140 try:
141 int(doc_type)
142 cmd = cmd % u'and vdm.pk_type = %(TYP)s'
143 except (TypeError, ValueError):
144 cmd = cmd % u'and vdm.pk_type = (select pk from blobs.doc_type where name = %(TYP)s)'
145
146 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}])
147 doc_ids = []
148 for row in rows:
149 doc_ids.append(row[0])
150 return doc_ids
151
158
160 args = {'pat': self.pk_patient}
161 cmd = _sql_fetch_document_fields % u"""
162 pk_doc IN (
163 SELECT DISTINCT ON (b_vo.pk_doc) b_vo.pk_doc
164 FROM blobs.v_obj4doc_no_data b_vo
165 WHERE
166 pk_patient = %(pat)s
167 AND
168 reviewed IS FALSE
169 )
170 ORDER BY clin_when DESC"""
171 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = True)
172 return [ cDocument(row = {'pk_field': 'pk_doc', 'idx': idx, 'data': r}) for r in rows ]
173
174 - def get_documents(self, doc_type=None, episodes=None, encounter=None, order_by=None, exclude_unsigned=False):
175 """Return list of documents."""
176
177 args = {
178 'pat': self.pk_patient,
179 'type': doc_type,
180 'enc': encounter
181 }
182 where_parts = [u'pk_patient = %(pat)s']
183
184 if doc_type is not None:
185 try:
186 int(doc_type)
187 where_parts.append(u'pk_type = %(type)s')
188 except (TypeError, ValueError):
189 where_parts.append(u'pk_type = (SELECT pk FROM blobs.doc_type WHERE name = %(type)s)')
190
191 if (episodes is not None) and (len(episodes) > 0):
192 where_parts.append(u'pk_episode IN %(epi)s')
193 args['epi'] = tuple(episodes)
194
195 if encounter is not None:
196 where_parts.append(u'pk_encounter = %(enc)s')
197
198 if exclude_unsigned:
199 where_parts.append(u'pk_doc IN (SELECT b_vo.pk_doc FROM blobs.v_obj4doc_no_data b_vo WHERE b_vo.pk_patient = %(pat)s AND b_vo.reviewed IS TRUE)')
200
201 if order_by is None:
202 order_by = u'ORDER BY clin_when'
203
204 cmd = u"%s\n%s" % (_sql_fetch_document_fields % u' AND '.join(where_parts), order_by)
205 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = True)
206
207 return [ cDocument(row = {'pk_field': 'pk_doc', 'idx': idx, 'data': r}) for r in rows ]
208
209 documents = property(get_documents, lambda x:x)
210
211 - def add_document(self, document_type=None, encounter=None, episode=None):
212 return create_document(document_type = document_type, encounter = encounter, episode = episode)
213
214 _sql_fetch_document_part_fields = u"select * from blobs.v_obj4doc_no_data where %s"
215
217 """Represents one part of a medical document."""
218
219 _cmd_fetch_payload = _sql_fetch_document_part_fields % u"pk_obj = %s"
220 _cmds_store_payload = [
221 u"""UPDATE blobs.doc_obj SET
222 seq_idx = %(seq_idx)s,
223 comment = gm.nullify_empty_string(%(obj_comment)s),
224 filename = gm.nullify_empty_string(%(filename)s),
225 fk_intended_reviewer = %(pk_intended_reviewer)s,
226 fk_doc = %(pk_doc)s
227 WHERE
228 pk = %(pk_obj)s
229 AND
230 xmin = %(xmin_doc_obj)s
231 RETURNING
232 xmin AS xmin_doc_obj"""
233 ]
234 _updatable_fields = [
235 'seq_idx',
236 'obj_comment',
237 'pk_intended_reviewer',
238 'filename',
239 'pk_doc'
240 ]
241
242
243
244 - def export_to_file(self, aChunkSize=0, filename=None, target_mime=None, target_extension=None, ignore_conversion_problems=False):
245
246 if self._payload[self._idx['size']] == 0:
247 return None
248
249 if filename is None:
250 suffix = None
251
252 if self._payload[self._idx['filename']] is not None:
253 name, suffix = os.path.splitext(self._payload[self._idx['filename']])
254 suffix = suffix.strip()
255 if suffix == u'':
256 suffix = None
257
258 filename = gmTools.get_unique_filename (
259 prefix = 'gm-doc_obj-page_%s-' % self._payload[self._idx['seq_idx']],
260 suffix = suffix
261 )
262
263 success = gmPG2.bytea2file (
264 data_query = {
265 'cmd': u'SELECT substring(data from %(start)s for %(size)s) FROM blobs.doc_obj WHERE pk=%(pk)s',
266 'args': {'pk': self.pk_obj}
267 },
268 filename = filename,
269 chunk_size = aChunkSize,
270 data_size = self._payload[self._idx['size']]
271 )
272
273 if not success:
274 return None
275
276 if target_mime is None:
277 return filename
278
279 if target_extension is None:
280 target_extension = gmMimeLib.guess_ext_by_mimetype(mimetype = target_mime)
281
282 target_fname = gmTools.get_unique_filename (
283 prefix = 'gm-doc_obj-page_%s-converted-' % self._payload[self._idx['seq_idx']],
284 suffix = target_extension
285 )
286 _log.debug('attempting conversion: [%s] -> [<%s>:%s]', filename, target_mime, target_fname)
287 if gmMimeLib.convert_file (
288 filename = filename,
289 target_mime = target_mime,
290 target_filename = target_fname
291 ):
292 return target_fname
293
294 _log.warning('conversion failed')
295 if not ignore_conversion_problems:
296 return None
297
298 _log.warning('programmed to ignore conversion problems, hoping receiver can handle [%s]', filename)
299 return filename
300
302 cmd = u"""
303 select
304 reviewer,
305 reviewed_when,
306 is_technically_abnormal,
307 clinically_relevant,
308 is_review_by_responsible_reviewer,
309 is_your_review,
310 coalesce(comment, '')
311 from blobs.v_reviewed_doc_objects
312 where pk_doc_obj = %s
313 order by
314 is_your_review desc,
315 is_review_by_responsible_reviewer desc,
316 reviewed_when desc
317 """
318 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': [self.pk_obj]}])
319 return rows
320
322 return cDocument(aPK_obj = self._payload[self._idx['pk_doc']])
323
324
325
327
328 if not (os.access(fname, os.R_OK) and os.path.isfile(fname)):
329 _log.error('[%s] is not a readable file' % fname)
330 return False
331
332 gmPG2.file2bytea (
333 query = u"UPDATE blobs.doc_obj SET data = %(data)s::bytea WHERE pk = %(pk)s",
334 filename = fname,
335 args = {'pk': self.pk_obj}
336 )
337
338
339 self.refetch_payload()
340 return True
341
342 - def set_reviewed(self, technically_abnormal=None, clinically_relevant=None):
343
344 cmd = u"""
345 select pk
346 from blobs.reviewed_doc_objs
347 where
348 fk_reviewed_row = %s and
349 fk_reviewer = (select pk from dem.staff where db_user = current_user)"""
350 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': [self.pk_obj]}])
351
352
353 if len(rows) == 0:
354 cols = [
355 u"fk_reviewer",
356 u"fk_reviewed_row",
357 u"is_technically_abnormal",
358 u"clinically_relevant"
359 ]
360 vals = [
361 u'%(fk_row)s',
362 u'%(abnormal)s',
363 u'%(relevant)s'
364 ]
365 args = {
366 'fk_row': self.pk_obj,
367 'abnormal': technically_abnormal,
368 'relevant': clinically_relevant
369 }
370 cmd = u"""
371 insert into blobs.reviewed_doc_objs (
372 %s
373 ) values (
374 (select pk from dem.staff where db_user=current_user),
375 %s
376 )""" % (', '.join(cols), ', '.join(vals))
377
378
379 if len(rows) == 1:
380 pk_row = rows[0][0]
381 args = {
382 'abnormal': technically_abnormal,
383 'relevant': clinically_relevant,
384 'pk_row': pk_row
385 }
386 cmd = u"""
387 update blobs.reviewed_doc_objs set
388 is_technically_abnormal = %(abnormal)s,
389 clinically_relevant = %(relevant)s
390 where
391 pk=%(pk_row)s"""
392 rows, idx = gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': args}])
393
394 return True
395
397 if self._payload[self._idx['type']] != u'patient photograph':
398 return False
399
400 rows, idx = gmPG2.run_ro_queries (
401 queries = [{
402 'cmd': u'select coalesce(max(seq_idx)+1, 1) from blobs.doc_obj where fk_doc=%(doc_id)s',
403 'args': {'doc_id': self._payload[self._idx['pk_doc']]}
404 }]
405 )
406 self._payload[self._idx['seq_idx']] = rows[0][0]
407 self._is_modified = True
408 self.save_payload()
409
411
412 fname = self.export_to_file(aChunkSize = chunksize)
413 if fname is None:
414 return False, ''
415
416 success, msg = gmMimeLib.call_viewer_on_file(fname, block = block)
417 if not success:
418 return False, msg
419
420 return True, ''
421
438
471
473 cmd = u"select blobs.delete_document_part(%(pk)s, %(enc)s)"
474 args = {'pk': part_pk, 'enc': encounter_pk}
475 rows, idx = gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': args}])
476 return
477
478 _sql_fetch_document_fields = u"""
479 SELECT
480 *,
481 COALESCE (
482 (SELECT array_agg(seq_idx) FROM blobs.doc_obj b_do WHERE b_do.fk_doc = b_vdm.pk_doc),
483 ARRAY[]::integer[]
484 )
485 AS seq_idx_list
486 FROM
487 blobs.v_doc_med b_vdm
488 WHERE
489 %s
490 """
491
492 -class cDocument(gmBusinessDBObject.cBusinessDBObject):
493 """Represents one medical document."""
494
495 _cmd_fetch_payload = _sql_fetch_document_fields % u"pk_doc = %s"
496 _cmds_store_payload = [
497 u"""update blobs.doc_med set
498 fk_type = %(pk_type)s,
499 fk_episode = %(pk_episode)s,
500 fk_encounter = %(pk_encounter)s,
501 clin_when = %(clin_when)s,
502 comment = gm.nullify_empty_string(%(comment)s),
503 ext_ref = gm.nullify_empty_string(%(ext_ref)s)
504 where
505 pk = %(pk_doc)s and
506 xmin = %(xmin_doc_med)s""",
507 u"""select xmin_doc_med from blobs.v_doc_med where pk_doc = %(pk_doc)s"""
508 ]
509
510 _updatable_fields = [
511 'pk_type',
512 'comment',
513 'clin_when',
514 'ext_ref',
515 'pk_episode',
516 'pk_encounter'
517 ]
518
520 try: del self.__has_unreviewed_parts
521 except AttributeError: pass
522
523 return super(cDocument, self).refetch_payload(ignore_changes = ignore_changes)
524
526 """Get document descriptions.
527
528 - will return a list of rows
529 """
530 if max_lng is None:
531 cmd = u"SELECT pk, text FROM blobs.doc_desc WHERE fk_doc = %s"
532 else:
533 cmd = u"SELECT pk, substring(text from 1 for %s) FROM blobs.doc_desc WHERE fk_doc=%%s" % max_lng
534 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': [self.pk_obj]}])
535 return rows
536
541
543 cmd = u"update blobs.doc_desc set text = %(desc)s where fk_doc = %(doc)s and pk = %(pk_desc)s"
544 gmPG2.run_rw_queries(queries = [
545 {'cmd': cmd, 'args': {'doc': self.pk_obj, 'pk_desc': pk, 'desc': description}}
546 ])
547 return True
548
550 cmd = u"delete from blobs.doc_desc where fk_doc = %(doc)s and pk = %(desc)s"
551 gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': {'doc': self.pk_obj, 'desc': pk}}])
552 return True
553
558
559 parts = property(_get_parts, lambda x:x)
560
562 """Add a part to the document."""
563
564 cmd = u"""
565 insert into blobs.doc_obj (
566 fk_doc, data, seq_idx
567 ) VALUES (
568 %(doc_id)s,
569 ''::bytea,
570 (select coalesce(max(seq_idx)+1, 1) from blobs.doc_obj where fk_doc=%(doc_id)s)
571 )"""
572 rows, idx = gmPG2.run_rw_queries (
573 queries = [
574 {'cmd': cmd, 'args': {'doc_id': self.pk_obj}},
575 {'cmd': u"select currval('blobs.doc_obj_pk_seq')"}
576 ],
577 return_data = True
578 )
579
580 pk_part = rows[0][0]
581 new_part = cDocumentPart(aPK_obj = pk_part)
582 if not new_part.update_data_from_file(fname=file):
583 _log.error('cannot import binary data from [%s] into document part' % file)
584 gmPG2.run_rw_queries (
585 queries = [
586 {'cmd': u"delete from blobs.doc_obj where pk = %s", 'args': [pk_part]}
587 ]
588 )
589 return None
590 new_part['filename'] = file
591 new_part.save_payload()
592
593 return new_part
594
596
597 new_parts = []
598
599 for filename in files:
600 new_part = self.add_part(file = filename)
601 if new_part is None:
602 msg = 'cannot instantiate document part object'
603 _log.error(msg)
604 return (False, msg, filename)
605 new_parts.append(new_part)
606
607 if reviewer is not None:
608 new_part['pk_intended_reviewer'] = reviewer
609 success, data = new_part.save_payload()
610 if not success:
611 msg = 'cannot set reviewer to [%s]' % reviewer
612 _log.error(msg)
613 _log.error(str(data))
614 return (False, msg, filename)
615
616 return (True, '', new_parts)
617
619 fnames = []
620 for part in self.parts:
621 fname = part.export_to_file(aChunkSize = chunksize)
622 if export_dir is not None:
623 shutil.move(fname, export_dir)
624 fname = os.path.join(export_dir, os.path.split(fname)[1])
625 fnames.append(fname)
626 return fnames
627
629 try:
630 return self.__has_unreviewed_parts
631 except AttributeError:
632 pass
633
634 cmd = u"SELECT EXISTS(SELECT 1 FROM blobs.v_obj4doc_no_data WHERE pk_doc = %(pk)s AND reviewed IS FALSE)"
635 args = {'pk': self.pk_obj}
636 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}])
637 self.__has_unreviewed_parts = rows[0][0]
638
639 return self.__has_unreviewed_parts
640
641 has_unreviewed_parts = property(_get_has_unreviewed_parts, lambda x:x)
642
643 - def set_reviewed(self, technically_abnormal=None, clinically_relevant=None):
644
645 for part in self.parts:
646 if not part.set_reviewed(technically_abnormal, clinically_relevant):
647 return False
648 return True
649
651 for part in self.parts:
652 part['pk_intended_reviewer'] = reviewer
653 success, data = part.save_payload()
654 if not success:
655 _log.error('cannot set reviewer to [%s]' % reviewer)
656 _log.error(str(data))
657 return False
658 return True
659
687
689 """Returns new document instance or raises an exception.
690 """
691 cmd = u"""INSERT INTO blobs.doc_med (fk_type, fk_encounter, fk_episode) VALUES (%(type)s, %(enc)s, %(epi)s) RETURNING pk"""
692 try:
693 int(document_type)
694 except ValueError:
695 cmd = u"""
696 INSERT INTO blobs.doc_med (
697 fk_type,
698 fk_encounter,
699 fk_episode
700 ) VALUES (
701 coalesce (
702 (SELECT pk from blobs.doc_type bdt where bdt.name = %(type)s),
703 (SELECT pk from blobs.doc_type bdt where _(bdt.name) = %(type)s)
704 ),
705 %(enc)s,
706 %(epi)s
707 ) RETURNING pk"""
708
709 args = {'type': document_type, 'enc': encounter, 'epi': episode}
710 rows, idx = gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': args}], return_data = True)
711 doc = cDocument(aPK_obj = rows[0][0])
712 return doc
713
715 """Searches for documents with the given patient and type ID."""
716 if patient_id is None:
717 raise ValueError('need patient id to search for document')
718
719 args = {'pat_id': patient_id, 'type_id': type_id, 'ref': external_reference}
720 where_parts = [u'pk_patient = %(pat_id)s']
721
722 if type_id is not None:
723 where_parts.append(u'pk_type = %(type_id)s')
724
725 if external_reference is not None:
726 where_parts.append(u'ext_ref = %(ref)s')
727
728 cmd = _sql_fetch_document_fields % u' AND '.join(where_parts)
729 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = True)
730 return [ cDocument(row = {'data': r, 'idx': idx, 'pk_field': 'pk_doc'}) for r in rows ]
731
733
734 cmd = u"SELECT blobs.delete_document(%(pk)s, %(enc)s)"
735 args = {'pk': document_id, 'enc': encounter_id}
736 rows, idx = gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': args}], return_data = True)
737 if not rows[0][0]:
738 _log.error('cannot delete document [%s]', document_id)
739 return False
740 return True
741
743
744 _log.debug('reclassifying documents by type')
745 _log.debug('original: %s', original_type)
746 _log.debug('target: %s', target_type)
747
748 if target_type['pk_doc_type'] == original_type['pk_doc_type']:
749 return True
750
751 cmd = u"""
752 update blobs.doc_med set
753 fk_type = %(new_type)s
754 where
755 fk_type = %(old_type)s
756 """
757 args = {u'new_type': target_type['pk_doc_type'], u'old_type': original_type['pk_doc_type']}
758
759 gmPG2.run_rw_queries(queries = [{'cmd': cmd, 'args': args}])
760
761 return True
762
763
765 """Represents a document type."""
766 _cmd_fetch_payload = u"""select * from blobs.v_doc_type where pk_doc_type=%s"""
767 _cmds_store_payload = [
768 u"""update blobs.doc_type set
769 name = %(type)s
770 where
771 pk=%(pk_obj)s and
772 xmin=%(xmin_doc_type)s""",
773 u"""select xmin_doc_type from blobs.v_doc_type where pk_doc_type = %(pk_obj)s"""
774 ]
775 _updatable_fields = ['type']
776
778
779 if translation.strip() == '':
780 return False
781
782 if translation.strip() == self._payload[self._idx['l10n_type']].strip():
783 return True
784
785 rows, idx = gmPG2.run_rw_queries (
786 queries = [
787 {'cmd': u'select i18n.i18n(%s)', 'args': [self._payload[self._idx['type']]]},
788 {'cmd': u'select i18n.upd_tx((select i18n.get_curr_lang()), %(orig)s, %(tx)s)',
789 'args': {
790 'orig': self._payload[self._idx['type']],
791 'tx': translation
792 }
793 }
794 ],
795 return_data = True
796 )
797 if not rows[0][0]:
798 _log.error('cannot set translation to [%s]' % translation)
799 return False
800
801 return self.refetch_payload()
802
804 rows, idx = gmPG2.run_ro_queries (
805 queries = [{'cmd': u"SELECT * FROM blobs.v_doc_type"}],
806 get_col_idx = True
807 )
808 doc_types = []
809 for row in rows:
810 row_def = {'pk_field': 'pk_doc_type', 'idx': idx, 'data': row}
811 doc_types.append(cDocumentType(row = row_def))
812 return doc_types
813
815 args = {'typ': document_type.strip()}
816
817 cmd = u'SELECT pk FROM blobs.doc_type WHERE name = %(typ)s'
818 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = False)
819 if len(rows) == 0:
820 cmd = u'SELECT pk FROM blobs.doc_type WHERE _(name) = %(typ)s'
821 rows, idx = gmPG2.run_ro_queries(queries = [{'cmd': cmd, 'args': args}], get_col_idx = False)
822
823 if len(rows) == 0:
824 return None
825
826 return rows[0]['pk']
827
829
830 cmd = u'select pk from blobs.doc_type where name = %s'
831 rows, idx = gmPG2.run_ro_queries (
832 queries = [{'cmd': cmd, 'args': [document_type]}]
833 )
834 if len(rows) == 0:
835 cmd1 = u"INSERT INTO blobs.doc_type (name) VALUES (%s) RETURNING pk"
836 rows, idx = gmPG2.run_rw_queries (
837 queries = [{'cmd': cmd1, 'args': [document_type]}],
838 return_data = True
839 )
840 return cDocumentType(aPK_obj = rows[0][0])
841
843 if document_type['is_in_use']:
844 return False
845 gmPG2.run_rw_queries (
846 queries = [{
847 'cmd': u'delete from blobs.doc_type where pk=%s',
848 'args': [document_type['pk_doc_type']]
849 }]
850 )
851 return True
852
862
863
864
865 if __name__ == '__main__':
866
867 if len(sys.argv) < 2:
868 sys.exit()
869
870 if sys.argv[1] != u'test':
871 sys.exit()
872
873
875
876 print "----------------------"
877 print "listing document types"
878 print "----------------------"
879
880 for dt in get_document_types():
881 print dt
882
883 print "------------------------------"
884 print "testing document type handling"
885 print "------------------------------"
886
887 dt = create_document_type(document_type = 'dummy doc type for unit test 1')
888 print "created:", dt
889
890 dt['type'] = 'dummy doc type for unit test 2'
891 dt.save_payload()
892 print "changed base name:", dt
893
894 dt.set_translation(translation = 'Dummy-Dokumenten-Typ fuer Unit-Test')
895 print "translated:", dt
896
897 print "deleted:", delete_document_type(document_type = dt)
898
899 return
900
902
903 print "-----------------------"
904 print "testing document import"
905 print "-----------------------"
906
907 docs = search_for_documents(patient_id=12)
908 doc = docs[0]
909 print "adding to doc:", doc
910
911 fname = sys.argv[1]
912 print "adding from file:", fname
913 part = doc.add_part(file=fname)
914 print "new part:", part
915
916 return
917
928
929
930 from Gnumed.pycommon import gmI18N
931 gmI18N.activate_locale()
932 gmI18N.install_domain()
933
934
935
936 test_get_documents()
937
938
939
940
941