Whenever I try to print the number of rows from a table of pyspark type. I have the same problem.
import findspark
findspark.init()
from pyspark.sql import *
spark = SparkSession \
    .builder \
    .appName("Python Spark SQL basic example") \
    .config("spark.driver.extraClassPath", "C:\\Spark\\Spark\\spark-2.4.3-bin-hadoop2.7\\jars\\mssql-jdbc-7.4.1.jre12"
                                           ".jar") \
    .getOrCreate()
mssql_df = spark.read.format("jdbc") \
    .option("url", "jdbc:sqlserver://MCNAVSQLCLU:1433;databaseName=NADIA;integratedSecurity=true") \
    .option("dbtable", "[SBM$Vendor Inspection Plan]") \
    .option("driver", 'com.microsoft.sqlserver.jdbc.SQLServerDriver').load()
mssql_df.count()
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
  File "C:/Users/renos.bardis/PycharmProjects/erp-mdm/SparkConn.py", line 19, in <module>
    mssql_df.count()
  File "C:\Spark\Spark\spark-2.4.3-bin-hadoop2.7\python\pyspark\sql\dataframe.py", line 522, in count
    return int(self._jdf.count())
  File "C:\Spark\Spark\spark-2.4.3-bin-hadoop2.7\python\lib\py4j-0.10.7-src.zip\py4j\java_gateway.py", line 1257, in __call__
  File "C:\Spark\Spark\spark-2.4.3-bin-hadoop2.7\python\pyspark\sql\utils.py", line 79, in deco
    raise IllegalArgumentException(s.split(': ', 1)[1], stackTrace)
pyspark.sql.utils.IllegalArgumentException: 'Unsupported class file major version 56'
 
    