I have written this snippet of code and I cannot understand why this float division returns inf when args[2] = 200 and 5.0 when args[2] = 2000. Is this caused because I am exceeding some decimal position boundary?
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
int main(int nargs, char **args) {
    float t = atoi(args[1]);   
    float dt = atoi(args[2])/1000;
    float nsamples = (t/dt);
    printf("%f\n", nsamples);
    return(0);
}
 
    