Here are few options that can solve this - 
df = spark.createDataFrame([('Drama',2015,705),('Romance',2015,203),('Comedy',2015,586),('Drama',2014,605),('Romance',2014,293),('Comedy ',2014,786)],['Genres','Year','Number_Movies'])
First Option: Define a rank using window function (partition by - Year and order by - Number_Movies desc). Highest Number_Movies each year will get rank "1". 
from pyspark.sql.window import Window
from pyspark.sql.functions import row_number,desc
w = Window.partitionBy("Year").orderBy(desc("Number_Movies"))
rank = row_number().over(w).alias('rank')
df.withColumn("rank", rank)\
    .where("rank=1")\
    .drop("rank")\
    .show()
#+-------+----+-------------+
#| Genres|Year|Number_Movies|
#+-------+----+-------------+
#|Comedy |2014|          786|
#|  Drama|2015|          705|
#+-------+----+-------------+
Second Option: Get maxumum of Number_Movies for each year and self join with dataframe to get the Genres.
from pyspark.sql.functions import max,col
joining_condition = [col('a.Year') == col('b.Year'), col('a.max_Number_Movies') == col('b.Number_Movies')]
df.groupBy("Year").\
    agg(max("Number_Movies").alias("max_Number_Movies")).alias("a").\
    join(df.alias("b"),  joining_condition).\
    selectExpr("b.Genres","b.Year","b.Number_Movies").\
    show()
#+-------+----+-------------+
#| Genres|Year|Number_Movies|
#+-------+----+-------------+
#|Comedy |2014|          786|
#|  Drama|2015|          705|
#+-------+----+-------------+