@article {981, title = {Going open source: some lessons learned from the development of OpenRecLink.}, journal = {Cad Saude Publica}, volume = {31}, year = {2015}, month = {2015 Feb}, pages = {257-63}, abstract = {

Record linkage is the process of identifying and merging records across different databases belonging to the same entity. The health sector is one of the pioneering areas of record linkage techniques applications. In 1998 we began the development of a software package, called RecLink that implemented probabilistic record linkage techniques. In this article we report the development of a new, open-source version of that program, now named OpenRecLink. The aim of this article is to present the main characteristics of the new version and some of the lessons learned during its development. The new version is a total rewrite of the program, based on three goals: (1) to migrate to a free and open source software (FOSS) platform; (2) to implement a multiplatform version; (3) to implement the support for internationalization. We describe the tools that we adopted, the process of development and some of the problems encountered.

}, keywords = {Databases, Factual, Medical Record Linkage, Software, Software Design}, issn = {1678-4464}, author = {Camargo Jr, Kenneth Rochel de and Coeli, Claudia Medina} } @article {1044, title = {The development of an application for data privacy by applying an audit repository based on IHE ATNA.}, journal = {Stud Health Technol Inform}, volume = {198}, year = {2014}, month = {2014}, pages = {219-25}, abstract = {

It is necessary to optimize workflows and communication between institutions involved in patients{\textquoteright} treatment to improve quality and efficiency of the German healthcare. To achieve these in the Metropolregion Rhein-Neckar, a personal, cross-institutional patient record (PEPA) is used. Given the immense sensitivity of health-related information saved in the PEPA, it is imperative to obey the data protection regulations in Germany. One important aspect is the logging of access to personal health data and all other safety-related events. For gathering audit information, the IHE profile ATNA can be used, because it provides a flexible and standardized infrastructure. There are already existing solutions for gathering the audit information based on ATNA. In this article one solution (OpenATNA) is evaluated, which uses the method of evaluation defined by Peter Baumgartner. In addition, a user interface for a privacy officer is necessary to support the examination of the audit information. Therefore, we will describe a method to develop an application in Liferay (an OpenSource enterprise portal project) which supports examinations on the gathered audit information.

}, keywords = {Computer Security, Confidentiality, Data Curation, Databases, Factual, electronic health records, Guideline Adherence, Guidelines as Topic, Information Storage and Retrieval, Internationality, Medical Audit, Medical Record Linkage, Software, Software Design}, issn = {0926-9630}, author = {Bresser, Laura and K{\"o}hler, Steffen and Schwaab, Christoph} } @article {871, title = {The caBIG annotation and image Markup project.}, journal = {J Digit Imaging}, volume = {23}, year = {2010}, month = {2010 Apr}, pages = {217-25}, abstract = {

Image annotation and markup are at the core of medical interpretation in both the clinical and the research setting. Digital medical images are managed with the DICOM standard format. While DICOM contains a large amount of meta-data about whom, where, and how the image was acquired, DICOM says little about the content or meaning of the pixel data. An image annotation is the explanatory or descriptive information about the pixel data of an image that is generated by a human or machine observer. An image markup is the graphical symbols placed over the image to depict an annotation. While DICOM is the standard for medical image acquisition, manipulation, transmission, storage, and display, there are no standards for image annotation and markup. Many systems expect annotation to be reported verbally, while markups are stored in graphical overlays or proprietary formats. This makes it difficult to extract and compute with both of them. The goal of the Annotation and Image Markup (AIM) project is to develop a mechanism, for modeling, capturing, and serializing image annotation and markup data that can be adopted as a standard by the medical imaging community. The AIM project produces both human- and machine-readable artifacts. This paper describes the AIM information model, schemas, software libraries, and tools so as to prepare researchers and developers for their use of AIM.

}, keywords = {Computational Biology, Computer Communication Networks, Databases, Factual, Diagnostic Imaging, Humans, Interdisciplinary Communication, Medical Records Systems, Computerized, National Cancer Institute (U.S.), National Institutes of Health (U.S.), Neoplasms, Program Evaluation, Quality of Health Care, Radiographic Image Enhancement, Radiology Information Systems, Software, Systems Integration, United States, User-Computer Interface}, issn = {1618-727X}, doi = {10.1007/s10278-009-9193-9}, author = {Channin, David S and Mongkolwat, Pattanasak and Kleper, Vladimir and Sepukar, Kastubh and Rubin, Daniel L} } @article {872, title = {Metadata mapping and reuse in caBIG.}, journal = {BMC Bioinformatics}, volume = {10 Suppl 2}, year = {2009}, month = {2009}, pages = {S4}, abstract = {

BACKGROUND: This paper proposes that interoperability across biomedical databases can be improved by utilizing a repository of Common Data Elements (CDEs), UML model class-attributes and simple lexical algorithms to facilitate the building domain models. This is examined in the context of an existing system, the National Cancer Institute (NCI){\textquoteright}s cancer Biomedical Informatics Grid (caBIG). The goal is to demonstrate the deployment of open source tools that can be used to effectively map models and enable the reuse of existing information objects and CDEs in the development of new models for translational research applications. This effort is intended to help developers reuse appropriate CDEs to enable interoperability of their systems when developing within the caBIG framework or other frameworks that use metadata repositories.

RESULTS: The Dice (di-grams) and Dynamic algorithms are compared and both algorithms have similar performance matching UML model class-attributes to CDE class object-property pairs. With algorithms used, the baselines for automatically finding the matches are reasonable for the data models examined. It suggests that automatic mapping of UML models and CDEs is feasible within the caBIG framework and potentially any framework that uses a metadata repository.

CONCLUSION: This work opens up the possibility of using mapping algorithms to reduce cost and time required to map local data models to a reference data model such as those used within caBIG. This effort contributes to facilitating the development of interoperable systems within caBIG as well as other metadata frameworks. Such efforts are critical to address the need to develop systems to handle enormous amounts of diverse data that can be leveraged from new biomedical methodologies.

}, keywords = {Algorithms, Computational Biology, Database Management Systems, Databases, Factual, Medical Informatics, Software, User-Computer Interface}, issn = {1471-2105}, doi = {10.1186/1471-2105-10-S2-S4}, author = {Kunz, Isaac and Lin, Ming-Chin and Frey, Lewis} } @article {500, title = {Advances in functional and structural MR image analysis and implementation as FSL.}, journal = {NeuroImage}, volume = {23 Suppl 1}, year = {2004}, month = {2004}, pages = {S208-19}, abstract = {The techniques available for the interrogation and analysis of neuroimaging data have a large influence in determining the flexibility, sensitivity, and scope of neuroimaging experiments. The development of such methodologies has allowed investigators to address scientific questions that could not previously be answered and, as such, has become an important research area in its own right. In this paper, we present a review of the research carried out by the Analysis Group at the Oxford Centre for Functional MRI of the Brain (FMRIB). This research has focussed on the development of new methodologies for the analysis of both structural and functional magnetic resonance imaging data. The majority of the research laid out in this paper has been implemented as freely available software tools within FMRIB{\textquoteright}s Software Library (FSL).}, keywords = {Bayes Theorem, Brain, Databases, Factual, Humans, Image Processing, Computer-Assisted, Magnetic Resonance Imaging, Models, Neurological, Models, Statistical, Software}, issn = {1053-8119}, author = {Smith, Stephen M and Jenkinson, Mark and Woolrich, Mark W and Beckmann, Christian F and Behrens, Timothy E J and Johansen-Berg, Heidi and Bannister, Peter R and De Luca, Marilena and Drobnjak, Ivana and Flitney, David E and Niazy, Rami K and Saunders, James and Vickers, John and Zhang, Yongyue and De Stefano, Nicola and Brady, J Michael and Matthews, Paul M} }