Brofos, James A; Lederman, Roy R Magnetic Manifold Hamiltonian Monte Carlo Technical Report 2020, (arXiv: 2010.07753). Abstract | Links | BibTeX | Tags: Algorithms, Computer Science - Machine Learning, HMC, Manifolds, MCMC, Statistics - Machine Learning @techreport{brofos_magnetic_2020, title = {Magnetic Manifold Hamiltonian Monte Carlo}, author = {James A Brofos and Roy R Lederman}, url = {http://arxiv.org/abs/2010.07753}, year = {2020}, date = {2020-10-01}, urldate = {2020-11-25}, abstract = {Markov chain Monte Carlo (MCMC) algorithms offer various strategies for sampling; the Hamiltonian Monte Carlo (HMC) family of samplers are MCMC algorithms which often exhibit improved mixing properties. The recently introduced magnetic HMC, a generalization of HMC motivated by the physics of particles influenced by magnetic field forces, has been demonstrated to improve the performance of HMC. In many applications, one wishes to sample from a distribution restricted to a constrained set, often manifested as an embedded manifold (for example, the surface of a sphere). We introduce magnetic manifold HMC, an HMC algorithm on embedded manifolds motivated by the physics of particles constrained to a manifold and moving under magnetic field forces. We discuss the theoretical properties of magnetic Hamiltonian dynamics on manifolds, and introduce a reversible and symplectic integrator for the HMC updates. We demonstrate that magnetic manifold HMC produces favorable sampling behaviors relative to the canonical variant of manifold-constrained HMC.}, note = {arXiv: 2010.07753}, keywords = {Algorithms, Computer Science - Machine Learning, HMC, Manifolds, MCMC, Statistics - Machine Learning}, pubstate = {published}, tppubtype = {techreport} } Markov chain Monte Carlo (MCMC) algorithms offer various strategies for sampling; the Hamiltonian Monte Carlo (HMC) family of samplers are MCMC algorithms which often exhibit improved mixing properties. The recently introduced magnetic HMC, a generalization of HMC motivated by the physics of particles influenced by magnetic field forces, has been demonstrated to improve the performance of HMC. In many applications, one wishes to sample from a distribution restricted to a constrained set, often manifested as an embedded manifold (for example, the surface of a sphere). We introduce magnetic manifold HMC, an HMC algorithm on embedded manifolds motivated by the physics of particles constrained to a manifold and moving under magnetic field forces. We discuss the theoretical properties of magnetic Hamiltonian dynamics on manifolds, and introduce a reversible and symplectic integrator for the HMC updates. We demonstrate that magnetic manifold HMC produces favorable sampling behaviors relative to the canonical variant of manifold-constrained HMC. |
Katz, Ori; Lederman, Roy R; Talmon, Ronen Spectral Flow on the Manifold of SPD Matrices for Multimodal Data Processing Technical Report 2020, (arXiv: 2009.08062). Abstract | Links | BibTeX | Tags: Common variable, Computer Science - Machine Learning, Manifold Learning, Multi-view, multimodal, SPD Matrices, Statistics - Machine Learning @techreport{katz_spectral_2020, title = {Spectral Flow on the Manifold of SPD Matrices for Multimodal Data Processing}, author = {Ori Katz and Roy R Lederman and Ronen Talmon}, url = {http://arxiv.org/abs/2009.08062}, year = {2020}, date = {2020-09-01}, urldate = {2020-11-25}, abstract = {In this paper, we consider data acquired by multimodal sensors capturing complementary aspects and features of a measured phenomenon. We focus on a scenario in which the measurements share mutual sources of variability but might also be contaminated by other measurement-specific sources such as interferences or noise. Our approach combines manifold learning, which is a class of nonlinear data-driven dimension reduction methods, with the well-known Riemannian geometry of symmetric and positive-definite (SPD) matrices. Manifold learning typically includes the spectral analysis of a kernel built from the measurements. Here, we take a different approach, utilizing the Riemannian geometry of the kernels. In particular, we study the way the spectrum of the kernels changes along geodesic paths on the manifold of SPD matrices. We show that this change enables us, in a purely unsupervised manner, to derive a compact, yet informative, description of the relations between the measurements, in terms of their underlying components. Based on this result, we present new algorithms for extracting the common latent components and for identifying common and measurement-specific components.}, note = {arXiv: 2009.08062}, keywords = {Common variable, Computer Science - Machine Learning, Manifold Learning, Multi-view, multimodal, SPD Matrices, Statistics - Machine Learning}, pubstate = {published}, tppubtype = {techreport} } In this paper, we consider data acquired by multimodal sensors capturing complementary aspects and features of a measured phenomenon. We focus on a scenario in which the measurements share mutual sources of variability but might also be contaminated by other measurement-specific sources such as interferences or noise. Our approach combines manifold learning, which is a class of nonlinear data-driven dimension reduction methods, with the well-known Riemannian geometry of symmetric and positive-definite (SPD) matrices. Manifold learning typically includes the spectral analysis of a kernel built from the measurements. Here, we take a different approach, utilizing the Riemannian geometry of the kernels. In particular, we study the way the spectrum of the kernels changes along geodesic paths on the manifold of SPD matrices. We show that this change enables us, in a purely unsupervised manner, to derive a compact, yet informative, description of the relations between the measurements, in terms of their underlying components. Based on this result, we present new algorithms for extracting the common latent components and for identifying common and measurement-specific components. |
Lederman, Roy R; Andén, Joakim; Singer, Amit Hyper-molecules: on the representation and recovery of dynamical structures for applications in flexible macro-molecules in cryo-EM Journal Article Inverse Problems, 36 (4), pp. 044005, 2020, ISSN: 0266-5611, 1361-6420. Links | BibTeX | Tags: cryo-EM, heterogeneity, HyperMolecules, MCMC, Variational inference @article{lederman_hyper-molecules_2020, title = {Hyper-molecules: on the representation and recovery of dynamical structures for applications in flexible macro-molecules in cryo-EM}, author = {Roy R Lederman and Joakim Andén and Amit Singer}, url = {https://iopscience.iop.org/article/10.1088/1361-6420/ab5ede}, doi = {10.1088/1361-6420/ab5ede}, issn = {0266-5611, 1361-6420}, year = {2020}, date = {2020-04-01}, urldate = {2020-08-13}, journal = {Inverse Problems}, volume = {36}, number = {4}, pages = {044005}, keywords = {cryo-EM, heterogeneity, HyperMolecules, MCMC, Variational inference}, pubstate = {published}, tppubtype = {article} } |
Bandeira, Afonso S; Chen, Yutong; Lederman, Roy R; Singer, Amit Non-unique games over compact groups and orientation estimation in cryo-EM Journal Article Inverse Problems, 36 (6), pp. 064002, 2020, ISSN: 0266-5611, 1361-6420. Links | BibTeX | Tags: Algorithms, cryo-EM, Non-unique games, Representation Theory @article{bandeira_non-unique_2020, title = {Non-unique games over compact groups and orientation estimation in cryo-EM}, author = {Afonso S Bandeira and Yutong Chen and Roy R Lederman and Amit Singer}, url = {https://iopscience.iop.org/article/10.1088/1361-6420/ab7d2c}, doi = {10.1088/1361-6420/ab7d2c}, issn = {0266-5611, 1361-6420}, year = {2020}, date = {2020-01-01}, urldate = {2020-08-13}, journal = {Inverse Problems}, volume = {36}, number = {6}, pages = {064002}, keywords = {Algorithms, cryo-EM, Non-unique games, Representation Theory}, pubstate = {published}, tppubtype = {article} } |
Brofos, James A; Lederman, Roy R Non-Canonical Hamiltonian Monte Carlo Technical Report 2020, (arXiv: 2008.08191). Abstract | Links | BibTeX | Tags: Algorithms, Computer Science - Machine Learning, HMC, MCMC, Statistics - Machine Learning @techreport{brofos_non-canonical_2020, title = {Non-Canonical Hamiltonian Monte Carlo}, author = {James A Brofos and Roy R Lederman}, url = {http://arxiv.org/abs/2008.08191}, year = {2020}, date = {2020-01-01}, urldate = {2020-11-25}, abstract = {Hamiltonian Monte Carlo is typically based on the assumption of an underlying canonical symplectic structure. Numerical integrators designed for the canonical structure are incompatible with motion generated by non-canonical dynamics. These non-canonical dynamics, motivated by examples in physics and symplectic geometry, correspond to techniques such as preconditioning which are routinely used to improve algorithmic performance. Indeed, recently, a special case of non-canonical structure, magnetic Hamiltonian Monte Carlo, was demonstrated to provide advantageous sampling properties. We present a framework for Hamiltonian Monte Carlo using non-canonical symplectic structures. Our experimental results demonstrate sampling advantages associated to Hamiltonian Monte Carlo with non-canonical structure. To summarize our contributions: (i) we develop non-canonical HMC from foundations in symplectic geomtry; (ii) we construct an HMC procedure using implicit integration that satisfies the detailed balance; (iii) we propose to accelerate the sampling using an textbackslashem approximate explicit methodology; (iv) we study two novel, randomly-generated non-canonical structures: magnetic momentum and the coupled magnet structure, with implicit and explicit integration.}, note = {arXiv: 2008.08191}, keywords = {Algorithms, Computer Science - Machine Learning, HMC, MCMC, Statistics - Machine Learning}, pubstate = {published}, tppubtype = {techreport} } Hamiltonian Monte Carlo is typically based on the assumption of an underlying canonical symplectic structure. Numerical integrators designed for the canonical structure are incompatible with motion generated by non-canonical dynamics. These non-canonical dynamics, motivated by examples in physics and symplectic geometry, correspond to techniques such as preconditioning which are routinely used to improve algorithmic performance. Indeed, recently, a special case of non-canonical structure, magnetic Hamiltonian Monte Carlo, was demonstrated to provide advantageous sampling properties. We present a framework for Hamiltonian Monte Carlo using non-canonical symplectic structures. Our experimental results demonstrate sampling advantages associated to Hamiltonian Monte Carlo with non-canonical structure. To summarize our contributions: (i) we develop non-canonical HMC from foundations in symplectic geomtry; (ii) we construct an HMC procedure using implicit integration that satisfies the detailed balance; (iii) we propose to accelerate the sampling using an textbackslashem approximate explicit methodology; (iv) we study two novel, randomly-generated non-canonical structures: magnetic momentum and the coupled magnet structure, with implicit and explicit integration. |
Lederman, Roy R; Singer, Amit A representation theory perspective on simultaneous alignment and classification Journal Article Applied and Computational Harmonic Analysis, 49 (3), pp. 1001–1024, 2020, ISSN: 1063-5203. Abstract | Links | BibTeX | Tags: Algorithms, Alignment, Classification, cryo-EM, Graph-cut, heterogeneity, Heterogeneous multireference alignment, Representation Theory, Rotation group, SDP, Synchronization @article{lederman_representation_2020, title = {A representation theory perspective on simultaneous alignment and classification}, author = {Roy R Lederman and Amit Singer}, url = {http://www.sciencedirect.com/science/article/pii/S1063520319301034}, doi = {10.1016/j.acha.2019.05.005}, issn = {1063-5203}, year = {2020}, date = {2020-01-01}, urldate = {2021-01-22}, journal = {Applied and Computational Harmonic Analysis}, volume = {49}, number = {3}, pages = {1001--1024}, abstract = {Single particle cryo-electron microscopy (EM) is a method for determining the 3-D structure of macromolecules from many noisy 2-D projection images of individual macromolecules whose orientations and positions are random and unknown. The problem of orientation assignment for the images motivated work on multireference alignment. The recent non-unique games framework provides a representation theoretic approach to alignment over compact groups, and offers a convex relaxation with certificates of global optimality in some cases. One of the great opportunities in cryo-EM is studying heterogeneous samples, containing two or more distinct conformations of molecules. Taking advantage of this opportunity presents an algorithmic challenge: determining both the class and orientation of each particle. We generalize multireference alignment to a problem of alignment and classification, and propose to extend non-unique games to the problem of simultaneous alignment and classification with the goal of simultaneously classifying cryo-EM images and aligning them within their classes.}, keywords = {Algorithms, Alignment, Classification, cryo-EM, Graph-cut, heterogeneity, Heterogeneous multireference alignment, Representation Theory, Rotation group, SDP, Synchronization}, pubstate = {published}, tppubtype = {article} } Single particle cryo-electron microscopy (EM) is a method for determining the 3-D structure of macromolecules from many noisy 2-D projection images of individual macromolecules whose orientations and positions are random and unknown. The problem of orientation assignment for the images motivated work on multireference alignment. The recent non-unique games framework provides a representation theoretic approach to alignment over compact groups, and offers a convex relaxation with certificates of global optimality in some cases. One of the great opportunities in cryo-EM is studying heterogeneous samples, containing two or more distinct conformations of molecules. Taking advantage of this opportunity presents an algorithmic challenge: determining both the class and orientation of each particle. We generalize multireference alignment to a problem of alignment and classification, and propose to extend non-unique games to the problem of simultaneous alignment and classification with the goal of simultaneously classifying cryo-EM images and aligning them within their classes. |
Lederman, Roy R; Steinerberger, S Extreme Values of the Fiedler Vector on Trees Technical Report 2019, (arXiv: 1912.08327). Abstract | Links | BibTeX | Tags: Computer Science - Discrete Mathematics, Graph Theory, Mathematics - Combinatorics, Mathematics - Spectral Theory @techreport{lederman_extreme_2019, title = {Extreme Values of the Fiedler Vector on Trees}, author = {Roy R Lederman and S Steinerberger}, url = {http://arxiv.org/abs/1912.08327}, year = {2019}, date = {2019-12-01}, urldate = {2020-08-13}, abstract = {Let $G$ be a connected tree on $n$ vertices and let $L = D-A$ denote the Laplacian matrix on $G$. The second-smallest eigenvalue $textbackslashlambda_2(G) textgreater 0$, also known as the algebraic connectivity, as well as the associated eigenvector $textbackslashphi_2$ have been of substantial interest. We investigate the question of when the maxima and minima of $textbackslashphi_2$ are assumed at the endpoints of the longest path in $G$. Our results also apply to more general graphs that `behave globally' like a tree but can exhibit more complicated local structure. The crucial new ingredient is a reproducing formula for the eigenvector $textbackslashphi_k$.}, note = {arXiv: 1912.08327}, keywords = {Computer Science - Discrete Mathematics, Graph Theory, Mathematics - Combinatorics, Mathematics - Spectral Theory}, pubstate = {published}, tppubtype = {techreport} } Let $G$ be a connected tree on $n$ vertices and let $L = D-A$ denote the Laplacian matrix on $G$. The second-smallest eigenvalue $textbackslashlambda_2(G) textgreater 0$, also known as the algebraic connectivity, as well as the associated eigenvector $textbackslashphi_2$ have been of substantial interest. We investigate the question of when the maxima and minima of $textbackslashphi_2$ are assumed at the endpoints of the longest path in $G$. Our results also apply to more general graphs that `behave globally' like a tree but can exhibit more complicated local structure. The crucial new ingredient is a reproducing formula for the eigenvector $textbackslashphi_k$. |
Brofos, James A; Shu, Rui; Lederman, Roy R A Bias-Variance Decomposition for Bayesian Deep Learning Inproceedings pp. 14, 2019. Abstract | BibTeX | Tags: Bayesian Deep Learning, Bayesian Inference, Deep Learning @inproceedings{brofos_bias-variance_2019, title = {A Bias-Variance Decomposition for Bayesian Deep Learning}, author = {James A Brofos and Rui Shu and Roy R Lederman}, year = {2019}, date = {2019-12-01}, pages = {14}, abstract = {We exhibit a decomposition of the Kullback-Leibler divergence into terms corresponding to bias, variance, and irreducible error. Our particular focus in this work is Bayesian deep learning and in this domain we illustrate the application of this decomposition to adversarial example identification, to image segmentation, and to malware detection. We empirically demonstrate qualitative similarities between the variance decomposition and mutual information.}, keywords = {Bayesian Deep Learning, Bayesian Inference, Deep Learning}, pubstate = {published}, tppubtype = {inproceedings} } We exhibit a decomposition of the Kullback-Leibler divergence into terms corresponding to bias, variance, and irreducible error. Our particular focus in this work is Bayesian deep learning and in this domain we illustrate the application of this decomposition to adversarial example identification, to image segmentation, and to malware detection. We empirically demonstrate qualitative similarities between the variance decomposition and mutual information. |
Shnitzer, Tal; Lederman, Roy R; Liu, Gi-Ren; Talmon, Ronen; Wu, Hau-Tieng Diffusion operators for multimodal data analysis Incollection Handbook of Numerical Analysis, 20 , pp. 1–39, Elsevier, 2019, ISBN: 978-0-444-64140-3. Links | BibTeX | Tags: Alternating Diffusion, BookChapter, Common variable, diffusion maps, Manifold Learning, Multi-view, multimodal, Multimodal data, Sensor fusion, Shape differences @incollection{shnitzer_diffusion_2019, title = {Diffusion operators for multimodal data analysis}, author = {Tal Shnitzer and Roy R Lederman and Gi-Ren Liu and Ronen Talmon and Hau-Tieng Wu}, url = {https://linkinghub.elsevier.com/retrieve/pii/S1570865919300213}, doi = {10.1016/bs.hna.2019.07.008}, isbn = {978-0-444-64140-3}, year = {2019}, date = {2019-01-01}, urldate = {2020-08-13}, booktitle = {Handbook of Numerical Analysis}, volume = {20}, pages = {1--39}, publisher = {Elsevier}, keywords = {Alternating Diffusion, BookChapter, Common variable, diffusion maps, Manifold Learning, Multi-view, multimodal, Multimodal data, Sensor fusion, Shape differences}, pubstate = {published}, tppubtype = {incollection} } |
Lederman, Roy R; Talmon, Ronen Learning the geometry of common latent variables using alternating-diffusion Journal Article Applied and Computational Harmonic Analysis, 44 (3), pp. 509–536, 2018, ISSN: 1063-5203. Abstract | Links | BibTeX | Tags: Algorithms, Alternating Diffusion, Alternating-diffusion, Common variable, diffusion maps, Diffusion-maps, Multi-view, multimodal, Multimodal analysis @article{lederman_learning_2018, title = {Learning the geometry of common latent variables using alternating-diffusion}, author = {Roy R Lederman and Ronen Talmon}, url = {http://www.sciencedirect.com/science/article/pii/S1063520315001190}, doi = {10.1016/j.acha.2015.09.002}, issn = {1063-5203}, year = {2018}, date = {2018-01-01}, urldate = {2020-08-13}, journal = {Applied and Computational Harmonic Analysis}, volume = {44}, number = {3}, pages = {509--536}, abstract = {One of the challenges in data analysis is to distinguish between different sources of variability manifested in data. In this paper, we consider the case of multiple sensors measuring the same physical phenomenon, such that the properties of the physical phenomenon are manifested as a hidden common source of variability (which we would like to extract), while each sensor has its own sensor-specific effects (hidden variables which we would like to suppress); the relations between the measurements and the hidden variables are unknown. We present a data-driven method based on alternating products of diffusion operators and show that it extracts the common source of variability. Moreover, we show that it extracts the common source of variability in a multi-sensor experiment as if it were a standard manifold learning algorithm used to analyze a simple single-sensor experiment, in which the common source of variability is the only source of variability.}, keywords = {Algorithms, Alternating Diffusion, Alternating-diffusion, Common variable, diffusion maps, Diffusion-maps, Multi-view, multimodal, Multimodal analysis}, pubstate = {published}, tppubtype = {article} } One of the challenges in data analysis is to distinguish between different sources of variability manifested in data. In this paper, we consider the case of multiple sensors measuring the same physical phenomenon, such that the properties of the physical phenomenon are manifested as a hidden common source of variability (which we would like to extract), while each sensor has its own sensor-specific effects (hidden variables which we would like to suppress); the relations between the measurements and the hidden variables are unknown. We present a data-driven method based on alternating products of diffusion operators and show that it extracts the common source of variability. Moreover, we show that it extracts the common source of variability in a multi-sensor experiment as if it were a standard manifold learning algorithm used to analyze a simple single-sensor experiment, in which the common source of variability is the only source of variability. |
Shaham, Uri; Lederman, Roy R Learning by coincidence: Siamese networks and common variable learning Journal Article Pattern Recognition, 74 , pp. 52–63, 2018, ISSN: 00313203. Links | BibTeX | Tags: Common variable, Deep Learning, Multi-view, multimodal, Siamese networks @article{shaham_learning_2018, title = {Learning by coincidence: Siamese networks and common variable learning}, author = {Uri Shaham and Roy R Lederman}, url = {https://linkinghub.elsevier.com/retrieve/pii/S0031320317303588}, doi = {10.1016/j.patcog.2017.09.015}, issn = {00313203}, year = {2018}, date = {2018-01-01}, urldate = {2020-08-13}, journal = {Pattern Recognition}, volume = {74}, pages = {52--63}, keywords = {Common variable, Deep Learning, Multi-view, multimodal, Siamese networks}, pubstate = {published}, tppubtype = {article} } |
Aldroubi, Akram; Huang, Longxiu; Krishtal, Ilya; Ledeczi, Akos; Lederman, Roy R; Volgyesi, Peter Dynamical sampling with additive random noise Technical Report (arXiv:1807.10866 [math]), 2018, (arXiv: 1807.10866). Abstract | Links | BibTeX | Tags: Mathematics - Numerical Analysis @techreport{aldroubi_dynamical_2018, title = {Dynamical sampling with additive random noise}, author = {Akram Aldroubi and Longxiu Huang and Ilya Krishtal and Akos Ledeczi and Roy R Lederman and Peter Volgyesi}, url = {http://arxiv.org/abs/1807.10866}, year = {2018}, date = {2018-01-01}, urldate = {2020-08-13}, number = {arXiv:1807.10866 [math]}, abstract = {Dynamical sampling deals with signals that evolve in time under the action of a linear operator. The purpose of the present paper is to analyze the performance of the basic dynamical sampling algorithms in the finite dimensional case and study the impact of additive noise. The algorithms are implemented and tested on synthetic and real data sets, and denoising techniques are integrated to mitigate the effect of the noise. We also develop theoretical and numerical results that validate the algorithm for recovering the driving operators, which are defined via a real symmetric convolution.}, note = {arXiv: 1807.10866}, keywords = {Mathematics - Numerical Analysis}, pubstate = {published}, tppubtype = {techreport} } Dynamical sampling deals with signals that evolve in time under the action of a linear operator. The purpose of the present paper is to analyze the performance of the basic dynamical sampling algorithms in the finite dimensional case and study the impact of additive noise. The algorithms are implemented and tested on synthetic and real data sets, and denoising techniques are integrated to mitigate the effect of the noise. We also develop theoretical and numerical results that validate the algorithm for recovering the driving operators, which are defined via a real symmetric convolution. |
Boumal, N; Bendory, T; Lederman, Roy R; Singer, A Heterogeneous multireference alignment: A single pass approach Inproceedings 2018 52nd Annual Conference on Information Sciences and Systems (CISS), pp. 1–6, 2018. Abstract | Links | BibTeX | Tags: bispectrum, concave programming, cryo-EM, cyclic shifts, Discrete Fourier transforms, estimation theory, expectation-maximization, Gaussian mixture models, heterogeneity, heterogeneous MRA, Heterogeneous multireference alignment, Multireference alignment, Noise measurement, non-convex optimization, nonconvex optimization problem, Optimization, Reliability, signal estimation, signal processing, Signal resolution, Signal to noise ratio, single pass approach, Standards @inproceedings{boumal_heterogeneous_2018, title = {Heterogeneous multireference alignment: A single pass approach}, author = {N Boumal and T Bendory and Roy R Lederman and A Singer}, doi = {10.1109/CISS.2018.8362313}, year = {2018}, date = {2018-01-01}, booktitle = {2018 52nd Annual Conference on Information Sciences and Systems (CISS)}, pages = {1--6}, abstract = {Multireference alignment (MRA) is the problem of estimating a signal from many noisy and cyclically shifted copies of itself. In this paper, we consider an extension called heterogeneous MRA, where K signals must be estimated, and each observation comes from one of those signals, unknown to us. This is a simplified model for the heterogeneity problem notably arising in cryo-electron microscopy. We propose an algorithm which estimates the K signals without estimating either the shifts or the classes of the observations. It requires only one pass over the data and is based on low-order moments that are invariant under cyclic shifts. Given sufficiently many measurements, one can estimate these invariant features averaged over the K signals. We then design a smooth, non-convex optimization problem to compute a set of signals which are consistent with the estimated averaged features. We find that, in many cases, the proposed approach estimates the set of signals accurately despite non-convexity, and conjecture the number of signals K that can be resolved as a function of the signal length L is on the order of √L.}, keywords = {bispectrum, concave programming, cryo-EM, cyclic shifts, Discrete Fourier transforms, estimation theory, expectation-maximization, Gaussian mixture models, heterogeneity, heterogeneous MRA, Heterogeneous multireference alignment, Multireference alignment, Noise measurement, non-convex optimization, nonconvex optimization problem, Optimization, Reliability, signal estimation, signal processing, Signal resolution, Signal to noise ratio, single pass approach, Standards}, pubstate = {published}, tppubtype = {inproceedings} } Multireference alignment (MRA) is the problem of estimating a signal from many noisy and cyclically shifted copies of itself. In this paper, we consider an extension called heterogeneous MRA, where K signals must be estimated, and each observation comes from one of those signals, unknown to us. This is a simplified model for the heterogeneity problem notably arising in cryo-electron microscopy. We propose an algorithm which estimates the K signals without estimating either the shifts or the classes of the observations. It requires only one pass over the data and is based on low-order moments that are invariant under cyclic shifts. Given sufficiently many measurements, one can estimate these invariant features averaged over the K signals. We then design a smooth, non-convex optimization problem to compute a set of signals which are consistent with the estimated averaged features. We find that, in many cases, the proposed approach estimates the set of signals accurately despite non-convexity, and conjecture the number of signals K that can be resolved as a function of the signal length L is on the order of √L. |
Lederman, Roy R Numerical Algorithms for the Computation of Generalized Prolate Spheroidal Functions Technical Report 2017. Abstract | Links | BibTeX | Tags: Algorithms, cryo-EM, Fourier Transform, Numerical Analysis, Prolate, Slepian, Software @techreport{lederman_numerical_2017, title = {Numerical Algorithms for the Computation of Generalized Prolate Spheroidal Functions}, author = {Roy R Lederman}, url = {https://arxiv.org/abs/1710.02874v1}, year = {2017}, date = {2017-10-01}, urldate = {2020-08-13}, abstract = {Generalized Prolate Spheroidal Functions (GPSF) are the eigenfunctions of the truncated Fourier transform, restricted to D-dimensional balls in the spatial domain and frequency domain. Despite their useful properties in many applications, GPSFs are often replaced by crude approximations. The purpose of this paper is to review the elements of computing GPSFs and associated eigenvalues. This paper is accompanied by open-source code.}, keywords = {Algorithms, cryo-EM, Fourier Transform, Numerical Analysis, Prolate, Slepian, Software}, pubstate = {published}, tppubtype = {techreport} } Generalized Prolate Spheroidal Functions (GPSF) are the eigenfunctions of the truncated Fourier transform, restricted to D-dimensional balls in the spatial domain and frequency domain. Despite their useful properties in many applications, GPSFs are often replaced by crude approximations. The purpose of this paper is to review the elements of computing GPSFs and associated eigenvalues. This paper is accompanied by open-source code. |
Lederman, Roy R; Singer, Amit Continuously heterogeneous hyper-objects in cryo-EM and 3-Đ movies of many temporal dimensions Technical Report (arXiv:1704.02899 [cs]), 2017, (arXiv: 1704.02899). Abstract | Links | BibTeX | Tags: Computer Science - Computer Vision and Pattern Recognition, cryo-EM, heterogeneity, HyperMolecules @techreport{lederman_continuously_2017, title = {Continuously heterogeneous hyper-objects in cryo-EM and 3-Đ movies of many temporal dimensions}, author = {Roy R Lederman and Amit Singer}, url = {http://arxiv.org/abs/1704.02899}, year = {2017}, date = {2017-04-01}, urldate = {2020-08-13}, number = {arXiv:1704.02899 [cs]}, abstract = {Single particle cryo-electron microscopy (EM) is an increasingly popular method for determining the 3-D structure of macromolecules from noisy 2-D images of single macromolecules whose orientations and positions are random and unknown. One of the great opportunities in cryo-EM is to recover the structure of macromolecules in heterogeneous samples, where multiple types or multiple conformations are mixed together. Indeed, in recent years, many tools have been introduced for the analysis of multiple discrete classes of molecules mixed together in a cryo-EM experiment. However, many interesting structures have a continuum of conformations which do not fit discrete models nicely; the analysis of such continuously heterogeneous models has remained a more elusive goal. In this manuscript, we propose to represent heterogeneous molecules and similar structures as higher dimensional objects. We generalize the basic operations used in many existing reconstruction algorithms, making our approach generic in the sense that, in principle, existing algorithms can be adapted to reconstruct those higher dimensional objects. As proof of concept, we present a prototype of a new algorithm which we use to solve simulated reconstruction problems.}, note = {arXiv: 1704.02899}, keywords = {Computer Science - Computer Vision and Pattern Recognition, cryo-EM, heterogeneity, HyperMolecules}, pubstate = {published}, tppubtype = {techreport} } Single particle cryo-electron microscopy (EM) is an increasingly popular method for determining the 3-D structure of macromolecules from noisy 2-D images of single macromolecules whose orientations and positions are random and unknown. One of the great opportunities in cryo-EM is to recover the structure of macromolecules in heterogeneous samples, where multiple types or multiple conformations are mixed together. Indeed, in recent years, many tools have been introduced for the analysis of multiple discrete classes of molecules mixed together in a cryo-EM experiment. However, many interesting structures have a continuum of conformations which do not fit discrete models nicely; the analysis of such continuously heterogeneous models has remained a more elusive goal. In this manuscript, we propose to represent heterogeneous molecules and similar structures as higher dimensional objects. We generalize the basic operations used in many existing reconstruction algorithms, making our approach generic in the sense that, in principle, existing algorithms can be adapted to reconstruct those higher dimensional objects. As proof of concept, we present a prototype of a new algorithm which we use to solve simulated reconstruction problems. |
Lederman, Roy R; Steinerberger, Stefan Lower Bounds for Truncated Fourier and Laplace Transforms Journal Article Integral Equations and Operator Theory, 87 (4), pp. 529–543, 2017, ISSN: 0378-620X, 1420-8989. Links | BibTeX | Tags: Fourier Transform, Laplace Transform @article{lederman_lower_2017, title = {Lower Bounds for Truncated Fourier and Laplace Transforms}, author = {Roy R Lederman and Stefan Steinerberger}, url = {http://link.springer.com/10.1007/s00020-017-2364-z}, doi = {10.1007/s00020-017-2364-z}, issn = {0378-620X, 1420-8989}, year = {2017}, date = {2017-04-01}, urldate = {2020-08-13}, journal = {Integral Equations and Operator Theory}, volume = {87}, number = {4}, pages = {529--543}, keywords = {Fourier Transform, Laplace Transform}, pubstate = {published}, tppubtype = {article} } |
Stanton, Kelly P; Jin, Jiaqi; Lederman, Roy R; Weissman, Sherman M; Kluger, Yuval Ritornello: high fidelity control-free chromatin immunoprecipitation peak calling Journal Article Nucleic Acids Research, 45 (21), pp. e173–e173, 2017, ISSN: 0305-1048, (Publisher: Oxford Academic). Abstract | Links | BibTeX | Tags: DNA sequencing, Sequencing, Software @article{stanton_ritornello_2017, title = {Ritornello: high fidelity control-free chromatin immunoprecipitation peak calling}, author = {Kelly P Stanton and Jiaqi Jin and Roy R Lederman and Sherman M Weissman and Yuval Kluger}, url = {https://academic.oup.com/nar/article/45/21/e173/4157402}, doi = {10.1093/nar/gkx799}, issn = {0305-1048}, year = {2017}, date = {2017-01-01}, urldate = {2020-08-13}, journal = {Nucleic Acids Research}, volume = {45}, number = {21}, pages = {e173--e173}, abstract = {Abstract. With the advent of next generation high-throughput DNA sequencing technologies, omics experiments have become the mainstay for studying diverse biolo}, note = {Publisher: Oxford Academic}, keywords = {DNA sequencing, Sequencing, Software}, pubstate = {published}, tppubtype = {article} } Abstract. With the advent of next generation high-throughput DNA sequencing technologies, omics experiments have become the mainstay for studying diverse biolo |
Aldroubi, Akram; Huang, L; Krishtal, I; Lederman, Roy R Dynamical sampling with random noise Inproceedings 2017 International Conference on Sampling Theory and Applications (SampTA), pp. 409–412, 2017. Abstract | Links | BibTeX | Tags: Dynamical Sampling, evolution operator, signal reconstruction, signal recovery, signal sampling @inproceedings{aldroubi_dynamical_2017, title = {Dynamical sampling with random noise}, author = {Akram Aldroubi and L Huang and I Krishtal and Roy R Lederman}, doi = {10.1109/SAMPTA.2017.8024372}, year = {2017}, date = {2017-01-01}, booktitle = {2017 International Conference on Sampling Theory and Applications (SampTA)}, pages = {409--412}, abstract = {In this paper we consider a system of dynamical sampling, i.e. sampling a signal f that evolves in time under the action of an evolution operator A. We discuss the error in the recovery of the original signal when the samples are corrupted by additive, independent and identically distributed (i.i.d) noise. We focus on the study of the mean squared error E(∥ϵn∥22) between the original signal and the reconstructed signal obtained by solving a least squares problem. In the theoretical part, we give a formula for E(∥ϵn∥22) and prove that E(∥ϵn∥22) decreases as the number of the samples increases. In addition, we discuss several numerical experiments that verify the theoretical results.}, keywords = {Dynamical Sampling, evolution operator, signal reconstruction, signal recovery, signal sampling}, pubstate = {published}, tppubtype = {inproceedings} } In this paper we consider a system of dynamical sampling, i.e. sampling a signal f that evolves in time under the action of an evolution operator A. We discuss the error in the recovery of the original signal when the samples are corrupted by additive, independent and identically distributed (i.i.d) noise. We focus on the study of the mean squared error E(∥ϵn∥22) between the original signal and the reconstructed signal obtained by solving a least squares problem. In the theoretical part, we give a formula for E(∥ϵn∥22) and prove that E(∥ϵn∥22) decreases as the number of the samples increases. In addition, we discuss several numerical experiments that verify the theoretical results. |
Lederman, Roy R; Singer, Amit A Representation Theory Perspective on Simultaneous Alignment and Classification Technical Report (arXiv:1607.03464 [cs, math]), 2016, (arXiv: 1607.03464). Abstract | Links | BibTeX | Tags: Algorithms, Computer Science - Computer Vision and Pattern Recognition, cryo-EM, Mathematics - Optimization and Control, Representation Theory @techreport{lederman_representation_2016, title = {A Representation Theory Perspective on Simultaneous Alignment and Classification}, author = {Roy R Lederman and Amit Singer}, url = {http://arxiv.org/abs/1607.03464}, year = {2016}, date = {2016-07-01}, urldate = {2021-01-22}, number = {arXiv:1607.03464 [cs, math]}, abstract = {One of the difficulties in 3D reconstruction of molecules from images in single particle Cryo-Electron Microscopy (Cryo-EM), in addition to high levels of noise and unknown image orientations, is heterogeneity in samples: in many cases, the samples contain a mixture of molecules, or multiple conformations of one molecule. Many algorithms for the reconstruction of molecules from images in heterogeneous Cryo-EM experiments are based on iterative approximations of the molecules in a non-convex optimization that is prone to reaching suboptimal local minima. Other algorithms require an alignment in order to perform classification, or vice versa. The recently introduced Non-Unique Games framework provides a representation theoretic approach to studying problems of alignment over compact groups, and offers convex relaxations for alignment problems which are formulated as semidefinite programs (SDPs) with certificates of global optimality under certain circumstances. In this manuscript, we propose to extend Non-Unique Games to the problem of simultaneous alignment and classification with the goal of simultaneously classifying Cryo-EM images and aligning them within their respective classes. Our proposed approach can also be extended to the case of continuous heterogeneity.}, note = {arXiv: 1607.03464}, keywords = {Algorithms, Computer Science - Computer Vision and Pattern Recognition, cryo-EM, Mathematics - Optimization and Control, Representation Theory}, pubstate = {published}, tppubtype = {techreport} } One of the difficulties in 3D reconstruction of molecules from images in single particle Cryo-Electron Microscopy (Cryo-EM), in addition to high levels of noise and unknown image orientations, is heterogeneity in samples: in many cases, the samples contain a mixture of molecules, or multiple conformations of one molecule. Many algorithms for the reconstruction of molecules from images in heterogeneous Cryo-EM experiments are based on iterative approximations of the molecules in a non-convex optimization that is prone to reaching suboptimal local minima. Other algorithms require an alignment in order to perform classification, or vice versa. The recently introduced Non-Unique Games framework provides a representation theoretic approach to studying problems of alignment over compact groups, and offers convex relaxations for alignment problems which are formulated as semidefinite programs (SDPs) with certificates of global optimality under certain circumstances. In this manuscript, we propose to extend Non-Unique Games to the problem of simultaneous alignment and classification with the goal of simultaneously classifying Cryo-EM images and aligning them within their respective classes. Our proposed approach can also be extended to the case of continuous heterogeneity. |
Lederman, Roy R; Steinerberger, Stefan Stability Estimates for Truncated Fourier and Laplace Transforms Technical Report (arXiv:1605.03866), 2016. Abstract | Links | BibTeX | Tags: Laplace Transform @techreport{lederman_stability_2016, title = {Stability Estimates for Truncated Fourier and Laplace Transforms}, author = {Roy R Lederman and Stefan Steinerberger}, url = {https://arxiv.org/abs/1605.03866v1}, year = {2016}, date = {2016-05-01}, urldate = {2020-08-13}, number = {arXiv:1605.03866}, abstract = {We prove sharp stability estimates for the Truncated Laplace Transform and Truncated Fourier Transform. The argument combines an approach recently introduced by Alaifari, Pierce and the second author for the truncated Hilbert transform with classical results of Bertero, Grünbaum, Landau, Pollak and Slepian. In particular, we prove there is a universal constant $c textgreater0$ such that for all $f textbackslashin Ltextasciicircum2(textbackslashmathbbR)$ with compact support in $[-1,1]$ normalized to $textbackslashtextbarftextbackslashtextbar_Ltextasciicircum2[-1,1] = 1$ $$ textbackslashint_-1textasciicircum1textbartextbackslashwidehatf(ξ)textbartextasciicircum2dξ textbackslashgtrsim textbackslashleft(ctextbackslashlefttextbackslashtextbarf_x textbackslashrighttextbackslashtextbar_Ltextasciicircum2[-1,1] textbackslashright)textasciicircumvphantom- ctextbackslashlefttextbackslashtextbarf_x textbackslashrighttextbackslashtextbar_Ltextasciicircum2[-1,1]vphantom$$ The inequality is sharp in the sense that there is an infinite sequence of orthonormal counterexamples if $c$ is chosen too small. The question whether and to which extent similar inequalities hold for generic families of integral operators remains open.}, keywords = {Laplace Transform}, pubstate = {published}, tppubtype = {techreport} } We prove sharp stability estimates for the Truncated Laplace Transform and Truncated Fourier Transform. The argument combines an approach recently introduced by Alaifari, Pierce and the second author for the truncated Hilbert transform with classical results of Bertero, Grünbaum, Landau, Pollak and Slepian. In particular, we prove there is a universal constant $c textgreater0$ such that for all $f textbackslashin Ltextasciicircum2(textbackslashmathbbR)$ with compact support in $[-1,1]$ normalized to $textbackslashtextbarftextbackslashtextbar_Ltextasciicircum2[-1,1] = 1$ $$ textbackslashint_-1textasciicircum1textbartextbackslashwidehatf(ξ)textbartextasciicircum2dξ textbackslashgtrsim textbackslashleft(ctextbackslashlefttextbackslashtextbarf_x textbackslashrighttextbackslashtextbar_Ltextasciicircum2[-1,1] textbackslashright)textasciicircumvphantom- ctextbackslashlefttextbackslashtextbarf_x textbackslashrighttextbackslashtextbar_Ltextasciicircum2[-1,1]vphantom$$ The inequality is sharp in the sense that there is an infinite sequence of orthonormal counterexamples if $c$ is chosen too small. The question whether and to which extent similar inequalities hold for generic families of integral operators remains open. |
Lederman, Roy R; Rokhlin, V On the Analytical and Numerical Properties of the Truncated Laplace Transform. Part II Journal Article SIAM Journal on Numerical Analysis, 54 (2), pp. 665–687, 2016, ISSN: 0036-1429, 1095-7170. Links | BibTeX | Tags: Algorithms, Laplace Transform, Numerical Analysis @article{lederman_analytical_2016, title = {On the Analytical and Numerical Properties of the Truncated Laplace Transform. Part II}, author = {Roy R Lederman and V Rokhlin}, url = {http://epubs.siam.org/doi/10.1137/15M1028583}, doi = {10.1137/15M1028583}, issn = {0036-1429, 1095-7170}, year = {2016}, date = {2016-01-01}, urldate = {2020-08-13}, journal = {SIAM Journal on Numerical Analysis}, volume = {54}, number = {2}, pages = {665--687}, keywords = {Algorithms, Laplace Transform, Numerical Analysis}, pubstate = {published}, tppubtype = {article} } |
Shaham, Uri; Lederman, Roy R Common Variable Learning and Invariant Representation Learning using Siamese Neural Networks Technical Report 2015. Abstract | Links | BibTeX | Tags: Common variable, Deep Learning, Multi-view @techreport{shaham_common_2015, title = {Common Variable Learning and Invariant Representation Learning using Siamese Neural Networks}, author = {Uri Shaham and Roy R Lederman}, url = {https://arxiv.org/abs/1512.08806v3}, year = {2015}, date = {2015-12-01}, urldate = {2020-08-13}, abstract = {We consider the statistical problem of learning common source of variability in data which are synchronously captured by multiple sensors, and demonstrate that Siamese neural networks can be naturally applied to this problem. This approach is useful in particular in exploratory, data-driven applications, where neither a model nor label information is available. In recent years, many researchers have successfully applied Siamese neural networks to obtain an embedding of data which corresponds to a "semantic similarity". We present an interpretation of this "semantic similarity" as learning of equivalence classes. We discuss properties of the embedding obtained by Siamese networks and provide empirical results that demonstrate the ability of Siamese networks to learn common variability.}, keywords = {Common variable, Deep Learning, Multi-view}, pubstate = {published}, tppubtype = {techreport} } We consider the statistical problem of learning common source of variability in data which are synchronously captured by multiple sensors, and demonstrate that Siamese neural networks can be naturally applied to this problem. This approach is useful in particular in exploratory, data-driven applications, where neither a model nor label information is available. In recent years, many researchers have successfully applied Siamese neural networks to obtain an embedding of data which corresponds to a "semantic similarity". We present an interpretation of this "semantic similarity" as learning of equivalence classes. We discuss properties of the embedding obtained by Siamese networks and provide empirical results that demonstrate the ability of Siamese networks to learn common variability. |
Lederman, Roy R; Talmon, Ronen; Wu, Hau-tieng; Lo, Yu-Lun; Coifman, Ronald R Alternating diffusion for common manifold learning with application to sleep stage assessment Inproceedings 2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 5758–5762, 2015, (ISSN: 2379-190X). Abstract | Links | BibTeX | Tags: Alternating Diffusion, Common variable, diffusion maps, Kernel, learning (artificial intelligence), Manifolds, multimodal, multimodal respiratory signals, multimodal signal processing, Physiology, Sensitivity, Sensor phenomena and characterization, signal processing, sleep, sleep stage assessment, standard manifold learning method, time series @inproceedings{lederman_alternating_2015, title = {Alternating diffusion for common manifold learning with application to sleep stage assessment}, author = {Roy R Lederman and Ronen Talmon and Hau-tieng Wu and Yu-Lun Lo and Ronald R Coifman}, doi = {10.1109/ICASSP.2015.7179075}, year = {2015}, date = {2015-01-01}, booktitle = {2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, pages = {5758--5762}, abstract = {In this paper, we address the problem of multimodal signal processing and present a manifold learning method to extract the common source of variability from multiple measurements. This method is based on alternating-diffusion and is particularly adapted to time series. We show that the common source of variability is extracted from multiple sensors as if it were the only source of variability, extracted by a standard manifold learning method from a single sensor, without the influence of the sensor-specific variables. In addition, we present application to sleep stage assessment. We demonstrate that, indeed, through alternating-diffusion, the sleep information hidden inside multimodal respiratory signals can be better captured compared to single-modal methods.}, note = {ISSN: 2379-190X}, keywords = {Alternating Diffusion, Common variable, diffusion maps, Kernel, learning (artificial intelligence), Manifolds, multimodal, multimodal respiratory signals, multimodal signal processing, Physiology, Sensitivity, Sensor phenomena and characterization, signal processing, sleep, sleep stage assessment, standard manifold learning method, time series}, pubstate = {published}, tppubtype = {inproceedings} } In this paper, we address the problem of multimodal signal processing and present a manifold learning method to extract the common source of variability from multiple measurements. This method is based on alternating-diffusion and is particularly adapted to time series. We show that the common source of variability is extracted from multiple sensors as if it were the only source of variability, extracted by a standard manifold learning method from a single sensor, without the influence of the sensor-specific variables. In addition, we present application to sleep stage assessment. We demonstrate that, indeed, through alternating-diffusion, the sleep information hidden inside multimodal respiratory signals can be better captured compared to single-modal methods. |
Lederman, Roy R; Rokhlin, V On the Analytical and Numerical Properties of the Truncated Laplace Transform I. Journal Article SIAM Journal on Numerical Analysis, 53 (3), pp. 1214–1235, 2015, ISSN: 0036-1429, 1095-7170. Links | BibTeX | Tags: Algorithms, Laplace Transform, Numerical Analysis @article{lederman_analytical_2015, title = {On the Analytical and Numerical Properties of the Truncated Laplace Transform I.}, author = {Roy R Lederman and V Rokhlin}, url = {http://epubs.siam.org/doi/10.1137/140990681}, doi = {10.1137/140990681}, issn = {0036-1429, 1095-7170}, year = {2015}, date = {2015-01-01}, urldate = {2020-08-13}, journal = {SIAM Journal on Numerical Analysis}, volume = {53}, number = {3}, pages = {1214--1235}, keywords = {Algorithms, Laplace Transform, Numerical Analysis}, pubstate = {published}, tppubtype = {article} } |
Lederman, Roy R; Talmon, Ronen Common Manifold Learning Using Alternating-Diffusion Technical Report Yale CS (YALEU/DCS/TR-1497), 2014. Links | BibTeX | Tags: AD, Algorithms, Alternating Diffusion, Manifold Learning @techreport{lederman_common_2014, title = {Common Manifold Learning Using Alternating-Diffusion}, author = {Roy R Lederman and Ronen Talmon}, url = {https://cpsc.yale.edu/sites/default/files/files/tr1497.pdf}, year = {2014}, date = {2014-01-01}, number = {YALEU/DCS/TR-1497}, pages = {42}, institution = {Yale CS}, keywords = {AD, Algorithms, Alternating Diffusion, Manifold Learning}, pubstate = {published}, tppubtype = {techreport} } |
Lederman, Roy R On the Analytical and Numerical Properties of the Truncated Laplace Transform Technical Report Yale CS (YALEU/DCS/TR-1490), 2014. BibTeX | Tags: Algorithms, Laplace Transform, Numerical Analysis @techreport{lederman_analytical_2014, title = {On the Analytical and Numerical Properties of the Truncated Laplace Transform}, author = {Roy R Lederman}, year = {2014}, date = {2014-01-01}, number = {YALEU/DCS/TR-1490}, pages = {82}, institution = {Yale CS}, keywords = {Algorithms, Laplace Transform, Numerical Analysis}, pubstate = {published}, tppubtype = {techreport} } |
Lederman, Roy R A permutations-based algorithm for fast alignment of long paired-end reads Technical Report Yale CS (YALEU/DCS/TR-1474), 2013. BibTeX | Tags: Algorithms, DNA sequencing, Fast algorithms, Randomized algorithms, Sequencing @techreport{lederman_permutations-based_2013, title = {A permutations-based algorithm for fast alignment of long paired-end reads}, author = {Roy R Lederman}, year = {2013}, date = {2013-04-01}, number = {YALEU/DCS/TR-1474}, pages = {11}, institution = {Yale CS}, keywords = {Algorithms, DNA sequencing, Fast algorithms, Randomized algorithms, Sequencing}, pubstate = {published}, tppubtype = {techreport} } |
Lederman, Roy R A Note about the Resolution-Length Characteristics of DNA Technical Report Yale CS (YALEU/DCS/TR-1473), 2013. BibTeX | Tags: Sequence Alignment, Sequencing @techreport{lederman_note_2013, title = {A Note about the Resolution-Length Characteristics of DNA}, author = {Roy R Lederman}, year = {2013}, date = {2013-04-01}, number = {YALEU/DCS/TR-1473}, pages = {6}, institution = {Yale CS}, keywords = {Sequence Alignment, Sequencing}, pubstate = {published}, tppubtype = {techreport} } |
Lederman, Roy R Building approximate overlap graphs for DNA assembly using random-permutations-based search Technical Report Yale CS (YALEU/DCS/TR-1470), 2012. BibTeX | Tags: DNA sequencing, Sequencing @techreport{lederman_building_2012, title = {Building approximate overlap graphs for DNA assembly using random-permutations-based search}, author = {Roy R Lederman}, year = {2012}, date = {2012-12-01}, number = {YALEU/DCS/TR-1470}, pages = {10}, institution = {Yale CS}, keywords = {DNA sequencing, Sequencing}, pubstate = {published}, tppubtype = {techreport} } |
Lederman, Roy R Homopolymer Length Filters Technical Report Yale CS (YALEU/DCS/TR-1465), 2012. BibTeX | Tags: Algorithms, DNA sequencing, Sequence Alignment, Sequencing @techreport{lederman_homopolymer_2012, title = {Homopolymer Length Filters}, author = {Roy R Lederman}, year = {2012}, date = {2012-10-01}, number = {YALEU/DCS/TR-1465}, pages = {12}, institution = {Yale CS}, keywords = {Algorithms, DNA sequencing, Sequence Alignment, Sequencing}, pubstate = {published}, tppubtype = {techreport} } |