projDir <- "/mnt/scratcha/bioinformatics/baller01/20200511_FernandesM_ME_crukBiSs2020"
outDirBit <- "AnaWiSce/Attempt1"
nbPcToComp <- 50
Source: Integrating Datasets of the OSCA book and the fastMNN manual.
Large single-cell RNA sequencing (scRNA-seq) projects usually need to generate data across multiple batches due to logistical constraints. However, the processing of different batches is often subject to uncontrollable differences, e.g., changes in operator, differences in reagent quality. This results in systematic differences in the observed expression in cells from different batches, which we refer to as “batch effects”. Batch effects are problematic as they can be major drivers of heterogeneity in the data, masking the relevant biological differences and complicating interpretation of the results.
Computational correction of these effects is critical for eliminating batch-to-batch variation, allowing data across multiple batches to be combined for common downstream analysis. However, existing methods based on linear models (Ritchie et al. 2015; Leek et al. 2012) assume that the composition of cell populations are either known or the same across batches. To overcome these limitations, bespoke methods have been developed for batch correction of single-cell data (Haghverdi et al. 2018; Butler et al. 2018; Lin et al. 2019) that do not require a priori knowledge about the composition of the population. This allows them to be used in workflows for exploratory analyses of scRNA-seq data where such knowledge is usually unavailable.
We will load the R file keeping the SCE object with the normalised counts.
setName <- "caron"
# Read object in:
setSuf <- ""
tmpFn <- sprintf("%s/%s/Robjects/%s_sce_nz_postDeconv%s.Rds", projDir, outDirBit, setName, setSuf)
print(tmpFn)
[1] "/mnt/scratcha/bioinformatics/baller01/20200511_FernandesM_ME_crukBiSs2020/AnaWiSce/Attempt1/Robjects/caron_sce_nz_postDeconv.Rds"
if(!file.exists(tmpFn))
{
knitr::knit_exit()
}
sce <- readRDS(tmpFn)
sce
class: SingleCellExperiment
dim: 18372 45669
metadata(0):
assays(1): counts
rownames(18372): ENSG00000238009 ENSG00000237491 ... ENSG00000275063
ENSG00000271254
rowData names(11): ensembl_gene_id external_gene_name ... detected
gene_sparsity
colnames: NULL
colData names(19): Sample Barcode ... discard cell_sparsity
reducedDimNames(0):
altExpNames(0):
colnames(rowData(sce))[colnames(rowData(sce)) == "strand"] <- "strandNum" # to avoid error later
#head(rowData(sce))
#head(colData(sce))
#assayNames(sce)
#reducedDimNames(sce)
Read in the sample sheet:
# CaronBourque2020
cb_sampleSheetFn <- file.path(projDir, "Data/CaronBourque2020/SraRunTable.txt")
cb_sampleSheet <- read.table(cb_sampleSheetFn, header=T, sep=",")
cb_sampleSheet <- cb_sampleSheet %>% filter(!Run == "SRR9264351")
cb_sampleSheet
Have sample names:
cb_sampleSheet$Sample.Name2 <- "NA"
typeVec <- unique(cb_sampleSheet$source_name)
for (tx in typeVec)
{
tmpInd <- which(cb_sampleSheet$source_name == tx)
for (i in 1:length(tmpInd))
{
cb_sampleSheet$Sample.Name2[tmpInd[i]] <- sprintf("%s_%s", tx, i)
}
}
colData(sce)$Sample.Name2 <- colData(sce) %>%
data.frame() %>%
left_join( cb_sampleSheet, by="Sample.Name") %>%
pull(Sample.Name2)
splVec <- cb_sampleSheet %>%
#filter(source_name == "ETV6-RUNX1") %>%
pull(Sample.Name2)
splVec
[1] "ETV6-RUNX1_1" "ETV6-RUNX1_2" "ETV6-RUNX1_3" "ETV6-RUNX1_4" "HHD_1"
[6] "HHD_2" "PRE-T_1" "PRE-T_2" "PBMMC_1" "PBMMC_2"
[11] "PBMMC_3"
ETV6-RUNX1_1
ETV6-RUNX1_2
ETV6-RUNX1_3
ETV6-RUNX1_4
HHD_1
HHD_2
PRE-T_1
PRE-T_2
PBMMC_1
PBMMC_2
PBMMC_3
Subset cells (1000 per sample for a faster analysis):
sceOrig <- sce
nbCells <- 1000
setSuf <- "_1kCps" # "_1kCellPerSpl"
all.sce <- list()
for(spx in splVec)
{
vec.bc <- colData(sce) %>%
data.frame() %>%
filter(Sample.Name2 == spx) %>%
sample_n(nbCells) %>%
pull(Barcode)
tmpInd <- which(colData(sce)$Barcode %in% vec.bc)
all.sce[[spx]] <- sce[,tmpInd]
}
# show size of sets:
lapply(all.sce, dim)
$`ETV6-RUNX1_1`
[1] 18372 1000
$`ETV6-RUNX1_2`
[1] 18372 1000
$`ETV6-RUNX1_3`
[1] 18372 1000
$`ETV6-RUNX1_4`
[1] 18372 1000
$HHD_1
[1] 18372 1000
$HHD_2
[1] 18372 1000
$`PRE-T_1`
[1] 18372 1000
$`PRE-T_2`
[1] 18372 1000
$PBMMC_1
[1] 18372 1000
$PBMMC_2
[1] 18372 1000
$PBMMC_3
[1] 18372 1000
# sizeFactors(all.sce[[1]])
We will analyse each sample separately, namely:
#--- normalization ---#
all.sce <- lapply(all.sce, logNormCounts)
#--- variance-modelling ---#
library(scran)
all.dec <- lapply(all.sce, modelGeneVar)
all.hvgs <- lapply(all.dec, getTopHVGs, prop=0.1)
#--- dimensionality-reduction ---#
library(BiocSingular)
set.seed(10000)
all.sce <- mapply(FUN=runPCA, x=all.sce, subset_row=all.hvgs,
MoreArgs=list(ncomponents=25, BSPARAM=RandomParam()),
SIMPLIFY=FALSE)
# TSNE
#set.seed(100000)
#all.sce <- lapply(all.sce, runTSNE, dimred="PCA")
# UMAP
#set.seed(1000000)
#all.sce <- lapply(all.sce, runUMAP, dimred="PCA")
#--- clustering ---#
for (n in names(all.sce)) {
g <- buildSNNGraph(all.sce[[n]], k=10, use.dimred='PCA')
clust <- igraph::cluster_walktrap(g)$membership
colLabels(all.sce[[n]]) <- factor(clust)
}
To prepare for the batch correction, we need to:
We subset all batches to the common “universe” of features. In this case, it is straightforward as both batches use Ensembl gene annotation.
allNames <- unlist(lapply(all.sce, function(x){rownames(x)}))
allNamesNb <- table(allNames)
universe <- names(allNamesNb)[allNamesNb==length(splVec)]
length(universe)
[1] 18372
# Subsetting the SingleCellExperiment object.
uni.sce <- lapply(all.sce, function(x){x[universe,]})
# Also subsetting the variance modelling results, for convenience.
uni.dec <- lapply(all.dec, function(x){x[universe,]})
We rescale each batch to adjust for differences in sequencing depth between batches. The multiBatchNorm() function recomputes log-normalized expression values after adjusting the size factors for systematic differences in coverage between SingleCellExperiment objects. (Size factors only remove biases between cells within a single batch.) This improves the quality of the correction by removing one aspect of the technical differences between batches.
library(batchelor)
rescaled.mbn <- multiBatchNorm(uni.sce, batch = "Sample.Name2")
We perform feature selection by averaging the variance components across all batches with the combineVar() function. We compute the average as it is responsive to batch-specific HVGs while still preserving the within-batch ranking of genes. This allows us to use the same strategies described in Section 8.3 to select genes of interest. In contrast, approaches based on taking the intersection or union of HVGs across batches become increasingly conservative or liberal, respectively, with an increasing number of batches.
library(scran)
combined.dec <- combineVar(uni.dec[[1]], uni.dec[[2]], uni.dec[[3]], uni.dec[[4]],
uni.dec[[5]], uni.dec[[6]],
uni.dec[[7]], uni.dec[[8]],
uni.dec[[9]], uni.dec[[10]], uni.dec[[11]]
)
chosen.hvgs <- combined.dec$bio > 0
sum(chosen.hvgs)
[1] 12317
When integrating datasets of variable composition, it is generally safer to err on the side of including more genes than are used in a single dataset analysis, to ensure that markers are retained for any dataset-specific subpopulations that might be present. For a top X selection, this means using a larger X (say, ~5000), or in this case, we simply take all genes above the trend. That said, many of the signal-to-noise considerations described in Section 8.3 still apply here, so some experimentation may be necessary for best results.
Alternatively, a more forceful approach to feature selection can be used based on marker genes from within-batch comparisons.
Before we actually perform any correction, it is worth examining whether there is any batch effect in this dataset. We combine the SingleCellExperiments and perform a PCA on the log-expression values for all genes with positive (average) biological components.
# Synchronizing the metadata for cbind()ing.
#identical(rowData(rescaled.mbn[[1]]), rowData(rescaled.mbn[[2]]))
#identical(rowData(rescaled.mbn[[1]]), rowData(rescaled.mbn[[3]]))
#identical(rowData(rescaled.mbn[[1]]), rowData(rescaled.mbn[[4]]))
rescaled2 <- lapply(rescaled.mbn, function(x){x$batch <- x$Sample.Name2; x})
rescaled.mbn <- rescaled2
rm(rescaled2)
uncorrected <- do.call(cbind, rescaled.mbn)
# Using RandomParam() as it is more efficient for file-backed matrices.
library(scater)
set.seed(0010101010)
uncorrected <- runPCA(uncorrected, subset_row=chosen.hvgs,
BSPARAM=BiocSingular::RandomParam())
We use graph-based clustering on the components to obtain a summary of the population structure. As our each sample group isrepresented by at least two replicates, each cluster should ideally consist of cells from several batches. This is the case for some but not all clusters. Some clusters comprise of cells from a single sample. This may indicate that cells of the same type are artificially separated due to technical differences between batches. They may also be cancer cell population private to samples.
library(scran)
snn.gr <- buildSNNGraph(uncorrected, use.dimred="PCA")
clusters <- igraph::cluster_walktrap(snn.gr)$membership
tab <- table(Cluster=clusters, Batch=uncorrected$batch)
tab
Batch
Cluster ETV6-RUNX1_1 ETV6-RUNX1_2 ETV6-RUNX1_3 ETV6-RUNX1_4 HHD_1 HHD_2 PBMMC_1
1 0 0 0 0 0 0 0
2 0 0 0 0 828 0 0
3 1 1 0 1 2 0 98
4 0 0 0 0 0 0 185
5 0 0 0 0 0 0 0
6 2 0 2 1 0 0 179
7 0 0 1 0 1 0 100
8 1 0 24 31 0 2 16
9 0 0 0 688 0 0 0
10 4 941 5 2 0 0 2
11 122 6 0 0 0 0 0
12 1 5 21 104 1 1 3
13 4 19 334 52 59 18 99
14 4 0 16 52 0 2 0
15 0 0 0 0 0 211 0
16 0 0 1 12 13 1 1
17 0 0 0 0 0 746 0
18 0 0 2 1 0 0 11
19 0 2 459 1 0 0 0
20 0 0 0 0 0 0 0
21 4 2 18 12 39 9 42
22 854 2 1 0 0 0 0
23 2 0 0 0 1 0 2
24 1 19 103 15 54 7 30
25 0 0 8 21 0 0 0
26 0 0 0 0 0 0 11
27 0 0 0 5 0 0 0
28 0 0 0 0 0 0 0
29 0 0 0 0 0 0 215
30 0 3 5 0 2 3 6
31 0 0 0 2 0 0 0
32 0 0 0 0 0 0 0
Batch
Cluster PBMMC_2 PBMMC_3 PRE-T_1 PRE-T_2
1 0 2 1 661
2 0 1 0 0
3 38 52 1 16
4 33 170 1 1
5 0 0 682 0
6 35 106 0 0
7 33 165 1 67
8 76 15 0 19
9 0 0 0 0
10 0 0 0 0
11 0 0 0 0
12 95 3 1 2
13 7 231 7 98
14 124 7 1 7
15 0 0 0 0
16 1 7 1 2
17 0 0 0 0
18 49 9 0 7
19 1 0 0 0
20 0 0 153 1
21 28 83 7 63
22 0 0 0 0
23 1 0 134 10
24 0 92 8 37
25 4 1 0 1
26 11 44 1 0
27 87 0 0 0
28 61 0 0 0
29 0 4 0 2
30 7 6 1 6
31 255 2 0 0
32 54 0 0 0
We can also visualize the corrected coordinates using a t-SNE plot (Figure 13.1). The strong separation between cells from different batches is consistent with the clustering results.
set.seed(1111001)
uncorrected <- runTSNE(uncorrected, dimred="PCA")
plotTSNE(uncorrected, colour_by="batch")
set.seed(1111001)
uncorrected <- runUMAP(uncorrected, dimred="PCA")
plotUMAP(uncorrected, colour_by="batch")
Of course, the other explanation for batch-specific clusters is that there are cell types that are unique to each batch. The degree of intermingling of cells from different batches is not an effective diagnostic when the batches involved might actually contain unique cell subpopulations. If a cluster only contains cells from a single batch, one can always debate whether that is caused by a failure of the correction method or if there is truly a batch-specific subpopulation. For example, do batch-specific metabolic or differentiation states represent distinct subpopulations? Or should they be merged together? We will not attempt to answer this here, only noting that each batch correction algorithm will make different (and possibly inappropriate) decisions on what constitutes “shared” and “unique” populations.
Batch effects in bulk RNA sequencing studies are commonly removed with linear regression. This involves fitting a linear model to each gene’s expression profile, setting the undesirable batch term to zero and recomputing the observations sans the batch effect, yielding a set of corrected expression values for downstream analyses. Linear modelling is the basis of the removeBatchEffect() function from the limma package (Ritchie et al. 2015) as well the comBat() function from the sva package (Leek et al. 2012).
To use this approach in a scRNA-seq context, we assume that the composition of cell subpopulations is the same across batches. We also assume that the batch effect is additive, i.e., any batch-induced fold-change in expression is the same across different cell subpopulations for any given gene. These are strong assumptions as batches derived from different individuals will naturally exhibit variation in cell type abundances and expression. Nonetheless, they may be acceptable when dealing with batches that are technical replicates generated from the same population of cells. (In fact, when its assumptions hold, linear regression is the most statistically efficient as it uses information from all cells to compute the common batch vector.) Linear modelling can also accommodate situations where the composition is known a priori by including the cell type as a factor in the linear model, but this situation is even less common.
We use the rescaleBatches() function from the batchelor package to remove the batch effect. This is roughly equivalent to applying a linear regression to the log-expression values per gene, with some adjustments to improve performance and efficiency. For each gene, the mean expression in each batch is scaled down until it is equal to the lowest mean across all batches. We deliberately choose to scale all expression values down as this mitigates differences in variance when batches lie at different positions on the mean-variance trend. (Specifically, the shrinkage effect of the pseudo-count is greater for smaller counts, suppressing any differences in variance across batches.) An additional feature of rescaleBatches() is that it will preserve sparsity in the input matrix for greater efficiency, whereas other methods like removeBatchEffect() will always return a dense matrix.
library(batchelor)
rescaled.rb <- rescaleBatches(rescaled.mbn)
rescaled.rb
class: SingleCellExperiment
dim: 18372 11000
metadata(0):
assays(1): corrected
rownames(18372): ENSG00000000003 ENSG00000000419 ... ENSG00000285486
ENSG00000285492
rowData names(0):
colnames: NULL
colData names(1): batch
reducedDimNames(0):
altExpNames(0):
After clustering, we observe fewer clusters and these consist of mixtures of cells from the several replicates, consistent with the removal of the batch effect. This conclusion is supported by the apparent mixing of cells from different batches on the TSNE plot below. However, at least one batch-specific cluster is still present, indicating that the correction is not entirely complete. This is attributable to violation of one of the aforementioned assumptions, even in this simple case involving replicated batches.
set.seed(1010101010) # To ensure reproducibility of IRLBA.
rescaled.rb <- runPCA(rescaled.rb, subset_row=chosen.hvgs, exprs_values="corrected")
snn.gr <- buildSNNGraph(rescaled.rb, use.dimred="PCA")
clusters.resc <- igraph::cluster_walktrap(snn.gr)$membership
tab.resc <- table(Cluster=clusters.resc, Batch=rescaled.rb$batch)
tab.resc
Batch
Cluster ETV6-RUNX1_1 ETV6-RUNX1_2 ETV6-RUNX1_3 ETV6-RUNX1_4 HHD_1 HHD_2 PBMMC_1
1 21 9 138 21 10 9 55
2 108 74 37 82 48 63 24
3 7 17 16 4 7 2 122
4 5 14 102 29 86 18 51
5 0 0 2 11 14 1 77
6 124 301 139 98 275 306 157
7 625 461 173 378 346 475 137
8 4 1 0 0 0 0 3
9 1 0 17 0 0 1 3
10 8 33 277 60 65 18 132
11 0 0 0 99 0 0 0
12 0 0 0 12 0 0 0
13 6 12 11 2 6 2 4
14 0 0 6 61 1 0 0
15 0 0 0 1 0 0 0
16 90 74 43 127 140 101 39
17 0 0 0 0 0 1 14
18 0 0 32 0 0 0 0
19 0 0 2 0 0 0 0
20 0 0 3 1 0 0 10
21 0 0 0 0 1 0 172
22 0 0 2 14 0 0 0
23 0 0 0 0 0 0 0
24 1 4 0 0 1 3 0
Batch
Cluster PBMMC_2 PBMMC_3 PRE-T_1 PRE-T_2
1 14 30 8 10
2 0 20 132 19
3 54 77 1 25
4 131 111 16 108
5 69 136 2 61
6 42 179 84 73
7 82 138 561 509
8 0 7 0 9
9 0 11 0 10
10 232 239 8 83
11 139 0 0 0
12 50 0 0 0
13 2 0 71 16
14 81 0 0 0
15 37 3 0 3
16 8 27 115 59
17 0 2 0 8
18 0 5 0 0
19 14 3 0 1
20 3 5 0 5
21 7 7 0 1
22 21 0 0 0
23 14 0 0 0
24 0 0 2 0
rescaled.rb <- runTSNE(rescaled.rb, dimred="PCA")
rescaled.rb$batch <- factor(rescaled.rb$batch)
plotTSNE(rescaled.rb, colour_by="batch")
Consider a cell a in batch A, and identify the cells in batch B that are nearest neighbors to a in the expression space defined by the selected features. Repeat this for a cell b in batch B, identifying its nearest neighbors in A. Mutual nearest neighbors (MNNs) are pairs of cells from different batches that belong in each other’s set of nearest neighbors. The reasoning is that MNN pairs represent cells from the same biological state prior to the application of a batch effect - see Haghverdi et al. (2018) for full theoretical details. Thus, the difference between cells in MNN pairs can be used as an estimate of the batch effect, the subtraction of which yields batch-corrected values.
Compared to linear regression, MNN correction does not assume that the population composition is the same or known beforehand. This is because it learns the shared population structure via identification of MNN pairs and uses this information to obtain an appropriate estimate of the batch effect. Instead, the key assumption of MNN-based approaches is that the batch effect is orthogonal to the biology in high-dimensional expression space. Violations reduce the effectiveness and accuracy of the correction, with the most common case arising from variations in the direction of the batch effect between clusters. Nonetheless, the assumption is usually reasonable as a random vector is very likely to be orthogonal in high-dimensional space.
The batchelor package provides an implementation of the MNN approach via the fastMNN() function. Unlike the MNN method originally described by Haghverdi et al. (2018), the fastMNN() function performs PCA to reduce the dimensions beforehand and speed up the downstream neighbor detection steps.
We apply it to our two PBMC batches to remove the batch effect across the highly variable genes in chosen.hvgs. To reduce computational work and technical noise, all cells in all batches are projected into the low-dimensional space defined by the top d principal components. Identification of MNNs and calculation of correction vectors are then performed in this low-dimensional space.
# Using randomized SVD here, as this is faster than
# irlba for file-backed matrices.
set.seed(1000101001)
mnn.out <- fastMNN(rescaled.mbn, auto.merge=TRUE, d=50, k=20, subset.row=chosen.hvgs,
BSPARAM=BiocSingular::RandomParam(deferred=TRUE))
mnn.out
class: SingleCellExperiment
dim: 12317 11000
metadata(2): merge.info pca.info
assays(1): reconstructed
rownames(12317): ENSG00000000003 ENSG00000000457 ... ENSG00000285458
ENSG00000285476
rowData names(1): rotation
colnames: NULL
colData names(1): batch
reducedDimNames(1): corrected
altExpNames(0):
mnn.out$batch <- factor(mnn.out$batch)
mnn.out$type <- gsub("_[1-4]","",mnn.out$batch)
The function returns a SingleCellExperiment object containing corrected values for downstream analyses like clustering or visualization. Each column of mnn.out corresponds to a cell in one of the batches, while each row corresponds to an input gene in chosen.hvgs. The batch field in the column metadata contains a vector specifying the batch of origin of each cell.
head(mnn.out$batch)
[1] ETV6-RUNX1_1 ETV6-RUNX1_1 ETV6-RUNX1_1 ETV6-RUNX1_1 ETV6-RUNX1_1
[6] ETV6-RUNX1_1
11 Levels: ETV6-RUNX1_1 ETV6-RUNX1_2 ETV6-RUNX1_3 ETV6-RUNX1_4 HHD_1 ... PRE-T_2
The corrected matrix in the reducedDims() contains the low-dimensional corrected coordinates for all cells, which we will use in place of the PCs in our downstream analyses.
dim(reducedDim(mnn.out, "corrected"))
[1] 11000 50
A reconstructed matrix in the assays() contains the corrected expression values for each gene in each cell, obtained by projecting the low-dimensional coordinates in corrected back into gene expression space. We do not recommend using this for anything other than visualization.
dim(assay(mnn.out, "reconstructed"))
[1] 12317 11000
print(assay(mnn.out, "reconstructed")[1:5,1:3])
<5 x 3> matrix of class LowRankMatrix and type "double":
[,1] [,2] [,3]
ENSG00000000003 -3.773369e-05 -8.300428e-05 -3.456565e-05
ENSG00000000457 2.106775e-04 3.885394e-04 5.365413e-06
ENSG00000000938 -1.976560e-04 2.491227e-04 6.767908e-04
ENSG00000000971 1.002027e-04 2.324994e-04 2.100986e-04
ENSG00000001167 7.467781e-04 -2.202970e-04 6.354969e-04
The most relevant parameter for tuning fastMNN() is k, which specifies the number of nearest neighbors to consider when defining MNN pairs. This can be interpreted as the minimum anticipated frequency of any shared cell type or state in each batch. Increasing k will generally result in more aggressive merging as the algorithm is more generous in matching subpopulations across batches. It can occasionally be desirable to increase k if one clearly sees that the same cell types are not being adequately merged across batches.
We cluster on the low-dimensional corrected coordinates to obtain a partitioning of the cells that serves as a proxy for the population structure. If the batch effect is successfully corrected, clusters corresponding to shared cell types or states should contain cells from multiple batches. We see that all clusters contain contributions from each batch after correction, consistent with our expectation that the two batches are replicates of each other.
library(scran)
snn.gr <- buildSNNGraph(mnn.out, use.dimred="corrected")
clusters.mnn <- igraph::cluster_walktrap(snn.gr)$membership
tab.mnn <- table(Cluster=clusters.mnn, Batch=mnn.out$batch)
tab.mnn
Batch
Cluster ETV6-RUNX1_1 ETV6-RUNX1_2 ETV6-RUNX1_3 ETV6-RUNX1_4 HHD_1 HHD_2 PBMMC_1
1 3 20 341 56 65 20 319
2 1 1 0 1 12 0 71
3 4 2 15 11 32 7 34
4 0 0 11 57 0 1 0
5 0 0 0 0 2 0 16
6 1 5 16 46 1 0 2
7 0 2 5 0 2 3 6
8 78 568 274 110 391 464 21
9 1 18 103 20 54 7 35
10 1 0 9 22 0 0 1
11 23 1 1 0 39 13 7
12 0 0 1 0 2 0 60
13 55 41 6 47 23 21 8
14 106 68 45 141 70 75 50
15 0 0 1 1 0 0 11
16 54 48 17 42 26 69 25
17 21 4 5 6 28 16 33
18 465 194 96 330 111 140 12
19 0 1 0 0 5 1 163
20 0 0 0 1 0 0 0
21 0 0 1 12 12 1 2
22 1 0 7 15 0 1 10
23 0 0 0 0 0 0 21
24 3 0 3 31 0 2 0
25 0 6 3 3 2 0 60
26 0 0 2 0 0 0 2
27 178 12 12 10 121 156 9
28 0 0 2 1 0 0 4
29 0 0 2 0 0 0 1
30 0 0 7 8 0 1 2
31 0 0 5 7 0 0 1
32 5 9 5 5 2 2 3
33 0 0 0 0 0 0 11
34 0 0 5 17 0 0 0
Batch
Cluster PBMMC_2 PBMMC_3 PRE-T_1 PRE-T_2
1 263 251 8 109
2 20 40 2 59
3 76 66 6 53
4 78 5 0 2
5 16 15 0 5
6 58 3 0 2
7 7 6 1 6
8 12 35 90 26
9 83 92 7 37
10 24 0 1 0
11 4 6 164 160
12 66 127 1 57
13 2 10 49 4
14 9 40 38 3
15 19 8 0 6
16 4 8 91 7
17 10 11 121 41
18 9 18 5 0
19 38 167 1 1
20 0 0 332 99
21 2 14 1 3
22 26 3 0 9
23 11 17 0 7
24 59 2 1 5
25 10 32 0 0
26 0 0 11 273
27 2 2 1 4
28 17 3 0 5
29 13 4 0 1
30 26 5 0 4
31 23 4 0 3
32 2 0 69 9
33 9 6 0 0
34 2 0 0 0
We can also visualize the corrected coordinates using a t-SNE plot (Figure 13.3). The presence of visual clusters containing cells from both batches provides a comforting illusion that the correction was successful.
library(scater)
set.seed(0010101010)
mnn.out <- runTSNE(mnn.out, dimred="corrected")
plotTSNE(mnn.out, colour_by="batch")
For fastMNN(), one useful diagnostic is the proportion of variance within each batch that is lost during MNN correction. Specifically, this refers to the within-batch variance that is removed during orthogonalization with respect to the average correction vector at each merge step. This is returned via the lost.var field in the metadata of mnn.out, which contains a matrix of the variance lost in each batch (column) at each merge step (row).
metadata(mnn.out)$merge.info$lost.var
ETV6-RUNX1_1 ETV6-RUNX1_2 ETV6-RUNX1_3 ETV6-RUNX1_4 HHD_1
[1,] 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
[2,] 0.000000000 0.000000000 0.000000000 0.000000000 0.000000000
[3,] 0.000000000 0.000000000 0.155115756 0.000000000 0.000000000
[4,] 0.000000000 0.000000000 0.005909417 0.203539937 0.000000000
[5,] 0.000000000 0.000000000 0.005937804 0.003367620 0.096239302
[6,] 0.000000000 0.116399134 0.009803825 0.016737279 0.003499484
[7,] 0.127044959 0.004622797 0.007733567 0.013218294 0.002314790
[8,] 0.007310957 0.009827945 0.019269202 0.003779890 0.016254615
[9,] 0.006604805 0.006685228 0.015948590 0.029239246 0.004494844
[10,] 0.003413216 0.004391660 0.005873768 0.005836308 0.006564319
HHD_2 PRE-T_1 PRE-T_2 PBMMC_1 PBMMC_2 PBMMC_3
[1,] 0.000000000 0.00000 0.000000000 0.041751796 0.000000000 0.050961815
[2,] 0.000000000 0.00000 0.000000000 0.016917085 0.165545763 0.019631472
[3,] 0.000000000 0.00000 0.000000000 0.009512430 0.016987176 0.012093444
[4,] 0.000000000 0.00000 0.000000000 0.004664088 0.004804665 0.003634476
[5,] 0.000000000 0.00000 0.000000000 0.005765378 0.003832367 0.004898184
[6,] 0.000000000 0.00000 0.000000000 0.004800169 0.013825906 0.005876827
[7,] 0.000000000 0.00000 0.000000000 0.004015535 0.012593871 0.003753828
[8,] 0.000000000 0.00000 0.178268013 0.014477662 0.006114257 0.012885908
[9,] 0.143930254 0.00000 0.010217644 0.006399640 0.027330714 0.007537288
[10,] 0.005799986 0.11074 0.005461576 0.004163632 0.005704912 0.004261348
Large proportions of lost variance (>10%) suggest that correction is removing genuine biological heterogeneity. This would occur due to violations of the assumption of orthogonality between the batch effect and the biological subspace (Haghverdi et al. 2018). In this case, the proportion of lost variance is small, indicating that non-orthogonality is not a major concern.
Another useful diagnostic check is to compare the clustering within each batch to the clustering of the merged data. Accurate data integration should preserve variance within each batch as there should be nothing to remove between cells in the same batch. This check complements the previously mentioned diagnostics that only focus on the removal of differences between batches. Specifically, it protects us against cases where the correction method simply aggregates all cells together, which would achieve perfect mixing but also discard the biological heterogeneity of interest.
Ideally, we should see a many-to-1 mapping where the across-batch clustering is nested inside the within-batch clusterings. This indicates that any within-batch structure was preserved after correction while acknowledging that greater resolution is possible with more cells. In practice, more discrepancies can be expected even when the correction is perfect, due to the existence of closely related clusters that were arbitrarily separated in the within-batch clustering. As a general rule, we can be satisfied with the correction if the vast majority of entries in Figure 13.4 are zero, though this may depend on whether specific clusters of interest are gained or lost.
One heatmap is generated for each of the PBMC 3K and 4K datasets, where each entry is colored according to the number of cells with each pair of labels (before and after correction).
library(pheatmap)
# For the first batch (adding +10 for a smoother color transition
# from zero to non-zero counts for any given matrix entry).
batchPlace <- 1
tmpInd <- mnn.out$batch==levels(mnn.out$batch)[batchPlace]
tab <- table(paste("after", clusters.mnn[tmpInd]),
paste("before", colLabels(rescaled.mbn[[batchPlace]])))
heat1 <- pheatmap(log10(tab+10), cluster_row=FALSE, cluster_col=FALSE,
main=sprintf("%s comparison", splVec[batchPlace]), silent=TRUE)
# For the second batch.
batchPlace <- 2
tmpInd <- mnn.out$batch==levels(mnn.out$batch)[batchPlace]
tab <- table(paste("after", clusters.mnn[tmpInd]),
paste("before", colLabels(rescaled.mbn[[batchPlace]])))
heat2 <- pheatmap(log10(tab+10), cluster_row=FALSE, cluster_col=FALSE,
main=sprintf("%s comparison", splVec[batchPlace]), silent=TRUE)
gridExtra::grid.arrange(heat1[[4]], heat2[[4]])
Another evaluation approach is to compute the coassignment probabilities (Section 10.6), i.e., the probability that cells from two within-batch clusters are clustered together in the across-batch clustering. High probabilities off the diagonal in Figure 13.5 indicate that within-batch clusters are merged in the across-batch analysis. We would generally expect low off-diagonal probabilities for most pairs of clusters, though this may not be reasonably possible if the within-batch clusters were poorly separated in the first place.
Coassignment probabilities for the within-batch clusters, based on coassignment of cells in the across-batch clusters obtained after MNN correction. One heatmap is generated for each of the GSM3872434 and GSM3872435, where each entry is colored according to the coassignment probability between each pair of within-batch clusters:
# For the first batch.
batchPlace <- 1
tmpInd <- mnn.out$batch==levels(mnn.out$batch)[batchPlace]
tab <- coassignProb(colLabels(rescaled.mbn[[batchPlace]]), clusters.mnn[tmpInd])
heat1 <- pheatmap(tab, cluster_row=FALSE, cluster_col=FALSE,
col=rev(viridis::magma(100)), main=sprintf("%s probabilities", splVec[batchPlace]), silent=TRUE)
# For the second batch.
batchPlace <- 2
tmpInd <- mnn.out$batch==levels(mnn.out$batch)[batchPlace]
tab <- coassignProb(colLabels(rescaled.mbn[[batchPlace]]), clusters.mnn[tmpInd])
heat2 <- pheatmap(tab, cluster_row=FALSE, cluster_col=FALSE,
col=rev(viridis::magma(100)), main=sprintf("%s probabilities", splVec[batchPlace]), silent=TRUE)
gridExtra::grid.arrange(heat1[[4]], heat2[[4]])
Finally, we can summarize the agreement between clusterings by computing the Rand index. This provides a simple metric that we can use to assess the preservation of variation by different correction methods. Larger rand indices (i.e., closer to 1) are more desirable, though this must be balanced against the ability of each method to actually remove the batch effect.
suppressMessages(library(fossil))
batchPlace <- 1
tmpInd <- mnn.out$batch==levels(mnn.out$batch)[batchPlace]
ri1 <- rand.index(as.integer(clusters.mnn[tmpInd]),
as.integer(colLabels(rescaled.mbn[[batchPlace]])))
ri1
[1] 0.7513013
batchPlace <- 2
tmpInd <- mnn.out$batch==levels(mnn.out$batch)[batchPlace]
ri2 <- rand.index(as.integer(clusters.mnn[tmpInd]),
as.integer(colLabels(rescaled.mbn[[batchPlace]])))
ri2
[1] 0.7077918
In some situations, we will already have performed within-batch analyses to characterize salient aspects of population heterogeneity. This is not uncommon when merging datasets from different sources where each dataset has already been analyzed, annotated and interpreted separately. It is subsequently desirable for the integration procedure to retain these “known interesting” aspects of each dataset in the merged dataset. We can encourage this outcome by using the marker genes within each dataset as our selected feature set for fastMNN() and related methods. This focuses on the relevant heterogeneity and represents a semi-supervised approach that is a natural extension of the strategy described in Section 8.4.
To illustrate, we apply this strategy to our PBMC datasets. We identify the top marker genes from pairwise Wilcoxon ranked sum tests between every pair of clusters within each batch, analogous to the method used by SingleR (Chapter 12). In this case, we use the top 10 marker genes but any value can be used depending on the acceptable trade-off between signal and noise (and speed). We then take the union across all comparisons in all batches and use that in place of our HVG set in fastMNN().
# Recall that groups for marker detection
# are automatically defined from 'colLabels()'.
stats1 <- pairwiseWilcox(rescaled.mbn[[1]], direction="up")
markers1 <- getTopMarkers(stats1[[1]], stats1[[2]], n=10)
stats2 <- pairwiseWilcox(rescaled.mbn[[2]], direction="up")
markers2 <- getTopMarkers(stats2[[1]], stats2[[2]], n=10)
stats3 <- pairwiseWilcox(rescaled.mbn[[3]], direction="up")
markers3 <- getTopMarkers(stats3[[1]], stats3[[2]], n=10)
stats4 <- pairwiseWilcox(rescaled.mbn[[4]], direction="up")
markers4 <- getTopMarkers(stats4[[1]], stats4[[2]], n=10)
marker.set <- unique(unlist(c(unlist(markers1), unlist(markers2), unlist(markers3), unlist(markers4))))
length(marker.set) # getting the total number of genes selected in this manner.
[1] 616
set.seed(1000110)
mnn.out2 <- fastMNN(rescaled.mbn[1:4], subset.row=marker.set,
BSPARAM=BiocSingular::RandomParam(deferred=TRUE))
A quick inspection indicates that the original within-batch structure is indeed preserved in the corrected data. This highlights the utility of a marker-based feature set for integrating datasets that have already been characterized separately in a manner that preserves existing interpretations of each dataset. We note that some within-batch clusters have merged, most likely due to the lack of robust separation in the first place, though this may also be treated as a diagnostic on the appropriateness of the integration depending on the context.
mnn.out2 <- runTSNE(mnn.out2, dimred="corrected")
batchVec <- levels(mnn.out$batch)
gridExtra::grid.arrange(
plotTSNE(mnn.out2[,mnn.out2$batch==batchVec[1]], colour_by=I(colLabels(rescaled.mbn[[1]]))),
plotTSNE(mnn.out2[,mnn.out2$batch==batchVec[2]], colour_by=I(colLabels(rescaled.mbn[[2]]))),
plotTSNE(mnn.out2[,mnn.out2$batch==batchVec[3]], colour_by=I(colLabels(rescaled.mbn[[3]]))),
plotTSNE(mnn.out2[,mnn.out2$batch==batchVec[4]], colour_by=I(colLabels(rescaled.mbn[[4]]))),
ncol=2
)
it is preferable to perform DE analyses using the uncorrected expression values with blocking on the batch, as discussed in Section 11.4. This strategy is based on the expectation that any genuine DE between clusters should still be present in a within-batch comparison where batch effects are absent. It penalizes genes that exhibit inconsistent DE across batches, thus protecting against misleading conclusions when a population in one batch is aligned to a similar-but-not-identical population in another batch. We demonstrate this approach below using a blocked t-test to detect markers in the PBMC dataset, where the presence of the same pattern across clusters within each batch (Figure 13.7) is reassuring. If integration is performed across multiple conditions, it is even more important to use the uncorrected expression values for downstream analyses - see Section 14.5.2 for a discussion.
m.out <- findMarkers(uncorrected, clusters.mnn, block=uncorrected$batch,
direction="up", lfc=1, row.data=rowData(uncorrected)[,c("ensembl_gene_id","Symbol"),drop=FALSE])
Warning in FUN(...): no within-block comparison between 20 and 10
Warning in FUN(...): no within-block comparison between 20 and 19
Warning in FUN(...): no within-block comparison between 25 and 20
Warning in FUN(...): no within-block comparison between 29 and 20
Warning in FUN(...): no within-block comparison between 33 and 20
Warning in FUN(...): no within-block comparison between 34 and 20
demo <- m.out[["1"]]
as.data.frame(demo[1:20,c("Symbol", "Top", "p.value", "FDR")])
#as.data.frame(demo[1:20,c("external_gene_name", "Top", "p.value", "FDR")])
Expression level for the top gene, ENSG00000008517:
geneEnsId <- rownames(demo)[1]
plotExpression(uncorrected, x=I(factor(clusters.mnn)),
features=geneEnsId, colour_by="batch") + facet_wrap(~colour_by)
We will first merge replicates in each sample group separately, then sample groups, starting with the group with the larger number of ‘cell types’.
# Using randomized SVD here, as this is faster than
# irlba for file-backed matrices.
set.seed(1000101001)
rm(mnn.out)
mnn.out <- fastMNN(rescaled.mbn,
merge.order=list( list(1,2,3,4), list(9,10,11), list(5,6), list(7,8) ),
d=50, k=20, subset.row=chosen.hvgs,
BSPARAM=BiocSingular::RandomParam(deferred=TRUE))
mnn.out
class: SingleCellExperiment
dim: 12317 11000
metadata(2): merge.info pca.info
assays(1): reconstructed
rownames(12317): ENSG00000000003 ENSG00000000457 ... ENSG00000285458
ENSG00000285476
rowData names(1): rotation
colnames: NULL
colData names(1): batch
reducedDimNames(1): corrected
altExpNames(0):
mnn.out$batch <- factor(mnn.out$batch)
mnn.out$type <- gsub("_[1-4]","",mnn.out$batch)
mnn.out$type <- factor(mnn.out$type)
#class(mnn.out$batch)
#head(mnn.out$batch)
#dim(reducedDim(mnn.out, "corrected"))
#assay(mnn.out, "reconstructed")
print(dim(assay(mnn.out, "reconstructed")))
[1] 12317 11000
print(assay(mnn.out, "reconstructed")[1:5,1:3])
<5 x 3> matrix of class LowRankMatrix and type "double":
[,1] [,2] [,3]
ENSG00000000003 -2.536382e-05 -1.602522e-04 -4.321568e-05
ENSG00000000457 5.065654e-04 7.128008e-04 2.080907e-04
ENSG00000000938 -7.889729e-04 -1.404166e-03 -3.766279e-04
ENSG00000000971 2.850208e-05 9.862104e-05 1.354787e-04
ENSG00000001167 2.090608e-04 -3.784854e-04 1.733540e-05
Diagnostic table and plots:
library(scran)
snn.gr <- buildSNNGraph(mnn.out, use.dimred="corrected")
clusters.mnn <- igraph::cluster_walktrap(snn.gr)$membership
mnn.out$clusters.mnn <- sprintf("c%s", clusters.mnn)
tab.mnn <- table(Cluster=mnn.out$clusters.mnn, Batch=mnn.out$batch)
tab.mnn
Batch
Cluster ETV6-RUNX1_1 ETV6-RUNX1_2 ETV6-RUNX1_3 ETV6-RUNX1_4 HHD_1 HHD_2 PBMMC_1
c1 1 0 10 16 0 1 15
c10 4 6 76 17 39 9 42
c11 1 3 14 54 0 1 1
c12 0 0 5 19 0 0 0
c13 1 2 12 42 1 0 2
c14 0 0 0 1 1 0 15
c15 15 0 1 3 34 20 21
c16 227 20 9 16 160 118 27
c17 4 1 2 1 6 0 59
c18 3 0 15 62 0 2 0
c19 0 0 1 0 0 0 1
c2 0 3 5 0 2 3 6
c20 0 0 3 1 0 0 11
c21 106 82 34 167 59 82 6
c22 426 309 96 323 133 229 8
c23 6 21 10 5 8 3 7
c24 1 19 104 20 54 7 40
c25 0 0 1 1 0 0 204
c26 3 16 277 49 57 18 109
c27 0 0 0 0 0 0 11
c28 0 0 1 9 11 1 3
c3 65 420 290 92 349 403 175
c4 0 0 9 14 0 1 1
c5 65 54 13 35 43 72 81
c6 8 1 2 1 18 8 60
c7 64 43 8 50 22 22 15
c8 0 0 1 1 3 0 45
c9 0 0 1 1 0 0 35
Batch
Cluster PBMMC_2 PBMMC_3 PRE-T_1 PRE-T_2
c1 43 6 0 0
c10 94 89 7 73
c11 60 3 1 13
c12 2 1 0 8
c13 56 3 0 0
c14 14 14 0 3
c15 7 11 162 45
c16 11 33 2 14
c17 1 14 40 9
c18 105 4 1 7
c19 0 0 18 301
c2 7 6 2 6
c20 30 9 0 7
c21 4 12 0 0
c22 9 18 0 1
c23 3 0 73 30
c24 86 112 8 37
c25 28 11 1 2
c26 217 219 7 90
c27 10 6 0 0
c28 6 19 1 10
c3 44 179 63 43
c4 47 9 0 0
c5 21 54 134 17
c6 13 13 465 224
c7 8 18 14 3
c8 54 113 1 49
c9 20 24 0 8
library(scater)
set.seed(0010101010)
mnn.out <- runTSNE(mnn.out, dimred="corrected")
p <- plotTSNE(mnn.out, colour_by="batch")
p + facet_wrap(~mnn.out$type)
Write mnn.out object to file
colData(mnn.out) <- cbind(colData(uncorrected),colData(mnn.out)[,c("type", "clusters.mnn")])
# Write object to file
# fastMnnWholeByList -> Fmwbl
tmpFn <- sprintf("%s/%s/Robjects/%s_sce_nz_postDeconv%s_Fmwbl.Rds", projDir, outDirBit, setName, setSuf)
saveRDS(mnn.out, tmpFn)
tmpFn <- sprintf("%s/%s/Robjects/%s_sce_nz_postDeconv%s_Fmwbl2.Rds", projDir, outDirBit, setName, setSuf)
saveRDS(list("chosen.hvgs"=chosen.hvgs, "uncorrected"=uncorrected,"rescaled.mbn"=rescaled.mbn), tmpFn)
Proportions of lost variance
metadata(mnn.out)$merge.info$lost.var
ETV6-RUNX1_1 ETV6-RUNX1_2 ETV6-RUNX1_3 ETV6-RUNX1_4 HHD_1 HHD_2
[1,] 0.000000000 0.000000000 0.000000000 0.00000000 0.00000000 0.00000000
[2,] 0.000000000 0.000000000 0.000000000 0.00000000 0.01241817 0.03496573
[3,] 0.000000000 0.000000000 0.000000000 0.00000000 0.00000000 0.00000000
[4,] 0.000000000 0.000000000 0.000000000 0.00000000 0.00000000 0.00000000
[5,] 0.022394503 0.028853804 0.000000000 0.00000000 0.00000000 0.00000000
[6,] 0.015196722 0.011555716 0.036390422 0.00000000 0.00000000 0.00000000
[7,] 0.002680773 0.002628968 0.005573193 0.05103570 0.00000000 0.00000000
[8,] 0.067038671 0.054243590 0.126698638 0.17689035 0.00000000 0.00000000
[9,] 0.019917382 0.021405755 0.033839111 0.03791951 0.09551676 0.10790694
[10,] 0.019784082 0.022209371 0.029477449 0.02976371 0.02634447 0.02664557
PRE-T_1 PRE-T_2 PBMMC_1 PBMMC_2 PBMMC_3
[1,] 0.02600436 0.01581972 0.000000000 0.000000000 0.00000000
[2,] 0.00000000 0.00000000 0.000000000 0.000000000 0.00000000
[3,] 0.00000000 0.00000000 0.045459704 0.187809533 0.00000000
[4,] 0.00000000 0.00000000 0.007615442 0.005678275 0.06605941
[5,] 0.00000000 0.00000000 0.000000000 0.000000000 0.00000000
[6,] 0.00000000 0.00000000 0.000000000 0.000000000 0.00000000
[7,] 0.00000000 0.00000000 0.000000000 0.000000000 0.00000000
[8,] 0.00000000 0.00000000 0.027822831 0.021451890 0.02269959
[9,] 0.00000000 0.00000000 0.018912623 0.044243631 0.02074165
[10,] 0.10883444 0.18154633 0.023707234 0.024626696 0.02033936
Comparison to within-batch clusters
library(pheatmap)
mnn.out$batch <- factor(mnn.out$batch) # somehow need to re-factor batch
levels(mnn.out$batch)
[1] "ETV6-RUNX1_1" "ETV6-RUNX1_2" "ETV6-RUNX1_3" "ETV6-RUNX1_4" "HHD_1"
[6] "HHD_2" "PBMMC_1" "PBMMC_2" "PBMMC_3" "PRE-T_1"
[11] "PRE-T_2"
ETV6-RUNX1_1
ETV6-RUNX1_2
ETV6-RUNX1_3
ETV6-RUNX1_4
HHD_1
HHD_2
PBMMC_1
PBMMC_2
PBMMC_3
PRE-T_1
PRE-T_2
# For the first batch (adding +10 for a smoother color transition
# from zero to non-zero counts for any given matrix entry).
batchPlace <- 1
tmpInd <- mnn.out$batch==levels(mnn.out$batch)[batchPlace]
length(paste("after", clusters.mnn[tmpInd]))
[1] 1000
rescaled.mbn[[batchPlace]]
class: SingleCellExperiment
dim: 18372 1000
metadata(0):
assays(2): counts logcounts
rownames(18372): ENSG00000000003 ENSG00000000419 ... ENSG00000285486
ENSG00000285492
rowData names(11): ensembl_gene_id external_gene_name ... detected
gene_sparsity
colnames: NULL
colData names(23): Sample Barcode ... label batch
reducedDimNames(1): PCA
altExpNames(0):
rescaled.mbn[[batchPlace]] %>% colData %>% head
DataFrame with 6 rows and 23 columns
Sample
<character>
1 /mnt/scratchb/bioinformatics/baller01/20200511_FernandesM_ME_crukBiSs2020/CaronBourque2020/grch38300/SRR9264343/SRR9264343/outs/filtered_feature_bc_matrix/
2 /mnt/scratchb/bioinformatics/baller01/20200511_FernandesM_ME_crukBiSs2020/CaronBourque2020/grch38300/SRR9264343/SRR9264343/outs/filtered_feature_bc_matrix/
3 /mnt/scratchb/bioinformatics/baller01/20200511_FernandesM_ME_crukBiSs2020/CaronBourque2020/grch38300/SRR9264343/SRR9264343/outs/filtered_feature_bc_matrix/
4 /mnt/scratchb/bioinformatics/baller01/20200511_FernandesM_ME_crukBiSs2020/CaronBourque2020/grch38300/SRR9264343/SRR9264343/outs/filtered_feature_bc_matrix/
5 /mnt/scratchb/bioinformatics/baller01/20200511_FernandesM_ME_crukBiSs2020/CaronBourque2020/grch38300/SRR9264343/SRR9264343/outs/filtered_feature_bc_matrix/
6 /mnt/scratchb/bioinformatics/baller01/20200511_FernandesM_ME_crukBiSs2020/CaronBourque2020/grch38300/SRR9264343/SRR9264343/outs/filtered_feature_bc_matrix/
Barcode Run Sample.Name source_name block sum
<character> <character> <character> <factor> <factor> <numeric>
1 AAACCTGGTCTTCAAG-1 SRR9264343 GSM3872434 ETV6-RUNX1 ETV6-RUNX1 11706
2 AAACCTGTCCCAAGTA-1 SRR9264343 GSM3872434 ETV6-RUNX1 ETV6-RUNX1 8354
3 AAACGGGCAGTTCATG-1 SRR9264343 GSM3872434 ETV6-RUNX1 ETV6-RUNX1 7839
4 AAACGGGGTAAGCACG-1 SRR9264343 GSM3872434 ETV6-RUNX1 ETV6-RUNX1 5438
5 AAACGGGGTTCACCTC-1 SRR9264343 GSM3872434 ETV6-RUNX1 ETV6-RUNX1 9280
6 AAACGGGTCGGTTCGG-1 SRR9264343 GSM3872434 ETV6-RUNX1 ETV6-RUNX1 9949
detected percent_top_50 percent_top_100 percent_top_200 percent_top_500
<integer> <numeric> <numeric> <numeric> <numeric>
1 3079 32.3595 45.7116 56.5437 68.9219
2 2307 37.4791 53.0285 63.3948 74.5870
3 2223 34.7111 49.4706 60.6710 73.9125
4 1759 37.0541 52.2986 62.7804 75.6896
5 2764 32.7802 45.9483 56.0345 68.7608
6 3015 28.5355 41.4212 52.1560 65.4538
subsets_Mito_sum subsets_Mito_detected subsets_Mito_percent total
<numeric> <integer> <numeric> <numeric>
1 567 12 4.84367 11706
2 519 13 6.21259 8354
3 428 11 5.45988 7839
4 352 11 6.47297 5438
5 740 12 7.97414 9280
6 397 11 3.99035 9949
setName discard cell_sparsity Sample.Name2 sizeFactor label
<character> <logical> <numeric> <character> <numeric> <factor>
1 Caron FALSE 0.856837 ETV6-RUNX1_1 4.77137 1
2 Caron FALSE 0.892698 ETV6-RUNX1_1 3.40539 4
3 Caron FALSE 0.896605 ETV6-RUNX1_1 3.19464 1
4 Caron FALSE 0.918233 ETV6-RUNX1_1 2.21631 5
5 Caron FALSE 0.871442 ETV6-RUNX1_1 3.78286 1
6 Caron FALSE 0.859814 ETV6-RUNX1_1 4.05475 2
batch
<character>
1 ETV6-RUNX1_1
2 ETV6-RUNX1_1
3 ETV6-RUNX1_1
4 ETV6-RUNX1_1
5 ETV6-RUNX1_1
6 ETV6-RUNX1_1
length(paste("before", colLabels(rescaled.mbn[[batchPlace]])))
[1] 1000
save.image("dataSetIntegrationWhole.debug.Rdata")
table(paste("after", clusters.mnn[tmpInd]))
after 1 after 10 after 11 after 13 after 15 after 16 after 17 after 18
1 4 1 1 15 227 4 3
after 21 after 22 after 23 after 24 after 26 after 3 after 5 after 6
106 426 6 1 3 65 65 8
after 7
64
table(paste("before", colLabels(rescaled.mbn[[batchPlace]])))
before 1 before 2 before 3 before 4 before 5 before 6 before 7
62 126 362 253 134 15 48
tab <- table(paste("after", clusters.mnn[tmpInd]),
paste("before", colLabels(rescaled.mbn[[batchPlace]])))
heat1 <- pheatmap(log10(tab+10), cluster_row=FALSE, cluster_col=FALSE,
main=sprintf("%s comparison", splVec[batchPlace]), silent=TRUE)
# For the second batch.
batchPlace <- 2
tmpInd <- mnn.out$batch==levels(mnn.out$batch)[batchPlace]
tab <- table(paste("after", clusters.mnn[tmpInd]),
paste("before", colLabels(rescaled.mbn[[batchPlace]])))
heat2 <- pheatmap(log10(tab+10), cluster_row=FALSE, cluster_col=FALSE,
main=sprintf("%s comparison", splVec[batchPlace]), silent=TRUE)
gridExtra::grid.arrange(heat1[[4]], heat2[[4]])
Co-assignment probabilities
# For the first batch.
batchPlace <- 1
tmpInd <- mnn.out$batch==levels(mnn.out$batch)[batchPlace]
tab <- coassignProb(colLabels(rescaled.mbn[[batchPlace]]), clusters.mnn[tmpInd])
heat1 <- pheatmap(tab, cluster_row=FALSE, cluster_col=FALSE,
col=rev(viridis::magma(100)), main=sprintf("%s probabilities", splVec[batchPlace]), silent=TRUE)
# For the second batch.
batchPlace <- 2
tmpInd <- mnn.out$batch==levels(mnn.out$batch)[batchPlace]
tab <- coassignProb(colLabels(rescaled.mbn[[batchPlace]]), clusters.mnn[tmpInd])
heat2 <- pheatmap(tab, cluster_row=FALSE, cluster_col=FALSE,
col=rev(viridis::magma(100)), main=sprintf("%s probabilities", splVec[batchPlace]), silent=TRUE)
gridExtra::grid.arrange(heat1[[4]], heat2[[4]])
Rand index:
library(fossil)
batchPlace <- 1
tmpInd <- mnn.out$batch==levels(mnn.out$batch)[batchPlace]
ri1 <- rand.index(as.integer(clusters.mnn[tmpInd]),
as.integer(colLabels(rescaled.mbn[[batchPlace]])))
ri1
[1] 0.745986
batchPlace <- 2
tmpInd <- mnn.out$batch==levels(mnn.out$batch)[batchPlace]
ri2 <- rand.index(as.integer(clusters.mnn[tmpInd]),
as.integer(colLabels(rescaled.mbn[[batchPlace]])))
ri2
[1] 0.7172673
Cluster markers:
m.out <- findMarkers(uncorrected, clusters.mnn, block=uncorrected$batch,
direction="up", lfc=1, row.data=rowData(uncorrected)[,c("ensembl_gene_id","Symbol"),drop=FALSE])
Warning in FUN(...): no within-block comparison between 19 and 1
Warning in FUN(...): no within-block comparison between 19 and 4
Warning in FUN(...): no within-block comparison between 19 and 13
Warning in FUN(...): no within-block comparison between 21 and 19
Warning in FUN(...): no within-block comparison between 22 and 19
Warning in FUN(...): no within-block comparison between 27 and 19
demo <- m.out[["1"]]
as.data.frame(demo[1:20,c("Symbol", "Top", "p.value", "FDR")])
Expression level fot the top gene, ENSG00000090013:
geneEnsId <- rownames(demo)[1]
plotExpression(uncorrected, x=I(factor(clusters.mnn)),
features=geneEnsId, colour_by="batch") + facet_wrap(~colour_by)
sessionInfo()
R version 4.0.0 (2020-04-24)
Platform: x86_64-pc-linux-gnu (64-bit)
Running under: CentOS Linux 7 (Core)
Matrix products: default
BLAS: /home/bioinformatics/software/R/R-4.0.0/lib64/R/lib/libRblas.so
LAPACK: /home/bioinformatics/software/R/R-4.0.0/lib64/R/lib/libRlapack.so
locale:
[1] LC_CTYPE=en_GB.UTF-8 LC_NUMERIC=C
[3] LC_TIME=en_GB.UTF-8 LC_COLLATE=en_GB.UTF-8
[5] LC_MONETARY=en_GB.UTF-8 LC_MESSAGES=en_GB.UTF-8
[7] LC_PAPER=en_GB.UTF-8 LC_NAME=C
[9] LC_ADDRESS=C LC_TELEPHONE=C
[11] LC_MEASUREMENT=en_GB.UTF-8 LC_IDENTIFICATION=C
attached base packages:
[1] parallel stats4 stats graphics grDevices utils datasets
[8] methods base
other attached packages:
[1] fossil_0.4.0 shapefiles_0.7
[3] foreign_0.8-79 maps_3.3.0
[5] sp_1.4-1 pheatmap_1.0.12
[7] batchelor_1.4.0 BiocSingular_1.4.0
[9] dplyr_1.0.0 scran_1.16.0
[11] scater_1.16.0 SingleCellExperiment_1.10.1
[13] SummarizedExperiment_1.18.2 DelayedArray_0.14.1
[15] matrixStats_0.56.0 Biobase_2.48.0
[17] GenomicRanges_1.40.0 GenomeInfoDb_1.24.2
[19] IRanges_2.22.2 S4Vectors_0.26.1
[21] BiocGenerics_0.34.0 ggplot2_3.3.2
loaded via a namespace (and not attached):
[1] viridis_0.5.1 edgeR_3.30.0
[3] jsonlite_1.6.1 viridisLite_0.3.0
[5] DelayedMatrixStats_1.10.0 statmod_1.4.34
[7] dqrng_0.2.1 GenomeInfoDbData_1.2.3
[9] vipor_0.4.5 yaml_2.2.1
[11] pillar_1.4.6 lattice_0.20-41
[13] glue_1.4.1 limma_3.44.1
[15] digest_0.6.25 RColorBrewer_1.1-2
[17] XVector_0.28.0 colorspace_1.4-1
[19] cowplot_1.0.0 htmltools_0.4.0
[21] Matrix_1.2-18 pkgconfig_2.0.3
[23] zlibbioc_1.34.0 purrr_0.3.4
[25] scales_1.1.1 RSpectra_0.16-0
[27] Rtsne_0.15 BiocParallel_1.22.0
[29] tibble_3.0.3 generics_0.0.2
[31] farver_2.0.3 ellipsis_0.3.1
[33] withr_2.2.0 magrittr_1.5
[35] crayon_1.3.4 evaluate_0.14
[37] beeswarm_0.2.3 tools_4.0.0
[39] lifecycle_0.2.0 stringr_1.4.0
[41] munsell_0.5.0 locfit_1.5-9.4
[43] irlba_2.3.3 compiler_4.0.0
[45] rsvd_1.0.3 rlang_0.4.7
[47] grid_4.0.0 RCurl_1.98-1.2
[49] BiocNeighbors_1.6.0 RcppAnnoy_0.0.16
[51] igraph_1.2.5 bitops_1.0-6
[53] base64enc_0.1-3 labeling_0.3
[55] rmarkdown_2.1 gtable_0.3.0
[57] codetools_0.2-16 R6_2.4.1
[59] gridExtra_2.3 knitr_1.28
[61] uwot_0.1.8 stringi_1.4.6
[63] ggbeeswarm_0.6.0 Rcpp_1.0.5
[65] vctrs_0.3.2 tidyselect_1.1.0
[67] xfun_0.13