After some work I figured out how to test 3 versions of it - tl;dr the blog post's version (v1) does indeed leak, but can be tweaked so that it doesn't (v2) and improved (v3):
common code:
template <typename Deleter>
using unique_p = std::unique_ptr<float[], Deleter>;
constexpr int length = 20;
v1: (what is recommended in the blog post)
void version1(){
    auto deleter = [](float* ptr) { cudaFree(ptr); std::cout<<"\nDeleted1\n"; };
    unique_p<decltype(deleter)> d_in(new float[length],deleter);
    cudaMalloc((void **) &d_in, length * sizeof(float));
    ...
}
v2: (similar to above, but initializes d_in with nullptr)
void version2(){
    auto deleter = [](float* ptr) { cudaFree(ptr); std::cout<<"\nDeleted2\n"; };
    unique_p<decltype(deleter)> d_in(nullptr,deleter);
    cudaMalloc((void **) &d_in, length * sizeof(float));
    ...
} 
v3: (d_in "adopts" pointer initialized with cudaMalloc)
void version3(){
    auto  myCudaMalloc = [](size_t mySize) { void* ptr; cudaMalloc((void**)&ptr, mySize); return ptr; };
    auto deleter = [](float* ptr) { cudaFree(ptr); std::cout<<"\nDeleted3\n"; };
    unique_p<decltype(deleter)> d_in((float*)myCudaMalloc(length*sizeof(float)),deleter);
    ...
}
All 3 create proper device pointers. However, version 1 definitely leaks host memory (tested using valgrind with the cuda warnings suppressed: Valgrind and CUDA: Are reported leaks real?). Neither v2 nor v3 leak host memory. cuda-memcheck also confirmed that there were no device-side memory leaks for any of the versions.
Between version 2 and 3, I prefer version 3 as it makes it more clear that unique_ptr owns the pointer and it follows the idiom of new and malloc in the unique_ptr constructor. You also only have to define the constructing function/lambda once and then can use it over and over again, so it is fewer lines of code. 
========================
Full test code (compiled with nvcc -std=c++14):
#include <cuda_runtime.h>
#include <memory>
#include <iostream>
template <typename Deleter>
using unique_p = std::unique_ptr<float[], Deleter>;
__global__ void printArray(float * d_in, int num){
    for(int i = 0; i < num; i++){ printf("%f\t",d_in[i]); }
    printf("\n");
}
struct myDeleter{
    void operator()(float* ptr){ cudaFree(ptr); std::cout<<"\nDeleted\n"; } 
};
constexpr int length = 20;
void version1(){
    auto deleter = [](float* ptr) { cudaFree(ptr); std::cout<<"\nDeleted1\n"; };
    unique_p<decltype(deleter)> d_in(new float[length],deleter);
    cudaMalloc((void **) &d_in, length * sizeof(float));
    std::unique_ptr<float[]> h_out(new float[length]);
    for(int i = 0; i < length; i++){ h_out[i] = i; }
    cudaMemcpy(d_in.get(), h_out.get(),length*sizeof(float),cudaMemcpyHostToDevice);
    printArray<<<1,1>>>(d_in.get(),length);
}
void version2(){
    auto deleter = [](float* ptr) { cudaFree(ptr); std::cout<<"\nDeleted2\n"; };
    unique_p<decltype(deleter)> d_in(nullptr,deleter);
    cudaMalloc((void **) &d_in, length * sizeof(float));
    std::unique_ptr<float[]> h_out(new float[length]);
    for(int i = 0; i < length; i++){ h_out[i] = i; }
    cudaMemcpy(d_in.get(), h_out.get(),length*sizeof(float),cudaMemcpyHostToDevice);
    printArray<<<1,1>>>(d_in.get(),length);
}
void version3(){
    auto  myCudaMalloc = [](size_t mySize) { void* ptr; cudaMalloc((void**)&ptr, mySize); return ptr; };
    auto deleter = [](float* ptr) { cudaFree(ptr); std::cout<<"\nDeleted3\n"; };
    unique_p<decltype(deleter)> d_in((float*)myCudaMalloc(length*sizeof(float)),deleter);
    //unique_p<myDeleter> d_in((float*)myCudaMalloc(20*sizeof(float)));
    std::unique_ptr<float[]> h_out(new float[length]);
    for(int i = 0; i < length; i++){ h_out[i] = i; }
    cudaMemcpy(d_in.get(), h_out.get(),length*sizeof(float),cudaMemcpyHostToDevice);
    printArray<<<1,1>>>(d_in.get(),length);
}
int main(){
    version1();
    version2();
    version3();
    cudaDeviceReset();
    return 0;
}