Here's the code:
class Program
{
    public static void Main()
    {
        var test = new Test();
        test.Go(1);
        test.Go(100);
        test.Go(10000);
        test.Go(1.0);
        test.Go(100.0);
        test.Go(10000.0);
        test.Go(65535.0);
        test.Go(1000000000);
        test.Go(1000000000.0);
    }
    class Test
    {
        public void Go(int id)
        { Console.WriteLine(id + "int"); }
        public void Go(String id)
        { Console.WriteLine(id + "string"); }
        public void Go(short id)
        { Console.WriteLine(id + "short"); }
        public void Go(long id)
        { Console.WriteLine(id + "long"); }
        public void Go(double id)
        { Console.WriteLine(id + "double"); }
        public void Go(float id)
        { Console.WriteLine(id + "float"); }
        public void Go(decimal id)
        { Console.WriteLine(id + "decimal"); }           
    }
}
1int
100int
10000int
1double
100double
10000double
65535double
1000000000int
1000000000double
It seems the CLR always picks int for integer type and double for floating point type.
Thanks.
 
     
    