I need to count the occurrences of the string "<page>" in a 104gb file, for getting the number of articles in a given Wikipedia dump. First, I've tried this.
grep -F '<page>' enwiki-20141208-pages-meta-current.xml | uniq -c
However, grep crashes after a while. Therefore, I wrote the following program. However, it only processes 20mb/s of the input file on my machine which is about 5% workload of my HDD. How can I speed up this code?
#include <iostream>
#include <fstream>
#include <string>
int main()
{
    // Open up file
    std::ifstream in("enwiki-20141208-pages-meta-current.xml");
    if (!in.is_open()) {
        std::cout << "Could not open file." << std::endl;
        return 0;
    }
    // Statistics counters
    size_t chars = 0, pages = 0;
    // Token to look for
    const std::string token = "<page>";
    size_t token_length = token.length();
    // Read one char at a time
    size_t matching = 0;
    while (in.good()) {
        // Read one char at a time
        char current;
        in.read(¤t, 1);
        if (in.eof())
            break;
        chars++;
        // Continue matching the token
        if (current == token[matching]) {
            matching++;
            // Reached full token
            if (matching == token_length) {
                pages++;
                matching = 0;
                // Print progress
                if (pages % 1000 == 0) {
                    std::cout << pages << " pages, ";
                    std::cout << (chars / 1024 / 1024) << " mb" << std::endl;
                }
            }
        }
        // Start over again
        else {
            matching = 0;
        }
    }
    // Print result
    std::cout << "Overall pages: " << pages << std::endl;
    // Cleanup
    in.close();
    return 0;
}
 
     
    