I'm trying to do as described here: Finding a subimage inside a Numpy image to be able to search an image inside screenshot.
The code looks like that:
import cv2
import numpy as np
import gtk.gdk
from PIL import Image
def make_screenshot():
    w = gtk.gdk.get_default_root_window()
    sz = w.get_size()
    pb = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, sz[0], sz[1])
    pb = pb.get_from_drawable(w, w.get_colormap(), 0, 0, 0, 0, sz[0], sz[1])
    width, height = pb.get_width(), pb.get_height()
    return Image.fromstring("RGB", (width, height), pb.get_pixels())
if __name__ == "__main__":
    img = make_screenshot()
    cv_im = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
    template = cv_im[30:40, 30:40, :]
    result = cv2.matchTemplate(cv_im, template, cv2.TM_CCORR_NORMED)
    print np.unravel_index(result.argmax(), result.shape)
Depending on method selected (instead of cv2.TM_CCORR_NORMED) I'm getting completely different coordinates, but none of them is (30, 30) as in example.
Please, teach me, what's wrong with such approach?
 
     
    