Let A be your target covariance matrix:
A <- matrix(c(1,0.2,0.1,0.2,1,0.2,0.1,0.2,1), nrow = 3)
#     [,1] [,2] [,3]
#[1,]  1.0  0.2  0.1
#[2,]  0.2  1.0  0.2
#[3,]  0.1  0.2  1.0
Here is a working function for getting N samples of the largest eigen value. It is much more efficient than using MASS::mvrnorm, as matrix factorization of A is only done once rather than N times.
g <- function (N, n, A) {
  ## get upper triangular Choleksy factor of covariance `A`
  R <- chol.default(A)
  ## a function to generate `n` samples from `N(0, A)`
  ## and get largest eigen value
  f <- function (n, R) {
    Xstd <- matrix(rnorm(n * dim(R)[1L]), n)  ## `n` standard normal samples
    X <- Xstd %*% R  ## transform to have covariance `A`
    S <- crossprod(X)  ## `X'X`
    max(eigen(S, symmetric = TRUE)$values)  ## symmetric eigen decomposition
    }
  ## replicate `N` times for `N` samples of largest eigen values
  replicate(N, f(n, R))
  }
## try `N = 1000`, `n = 10`, as in your original code
set.seed(0); x <- g(1000, 10, A)
Note, I don't ask g to do summary and plot. Because as long as we have samples we can do it any time.
d <- density.default(x)  ## density estimation
h <- hist.default(x, plot = FALSE)  ## histogram
graphics:::plot.histogram(h, freq = FALSE, ylim = c(0, max(h$density, d$y)),
                          main = "histogram of largest Eigen value")
lines(d$x, d$y, col = 2)
