You can write your own solver based scipy.optimize, here is a small example on how  to code your custom python quadprog():
# python3
import numpy as np
from scipy import optimize
class quadprog(object):
    def __init__(self, H, f, A, b, x0, lb, ub):
        self.H    = H
        self.f    = f
        self.A    = A
        self.b    = b
        self.x0   = x0
        self.bnds = tuple([(lb, ub) for x in x0])
        # call solver
        self.result = self.solver()
    def objective_function(self, x):
        return 0.5*np.dot(np.dot(x.T, self.H), x) + np.dot(self.f.T, x)
    def solver(self):
        cons = ({'type': 'ineq', 'fun': lambda x: self.b - np.dot(self.A, x)})
        optimum = optimize.minimize(self.objective_function, 
                                    x0          = self.x0.T,
                                    bounds      = self.bnds,
                                    constraints = cons, 
                                    tol         = 10**-3)
        return optimum
Here is how to use this, using the same variables from the first example provided in matlab-quadprog:
# init vars
H  = np.array([[ 1, -1],
               [-1,  2]])
f  = np.array([-2, -6]).T
A  = np.array([[ 1, 1],
               [-1, 2],
               [ 2, 1]])
b  = np.array([2, 2, 3]).T
x0 = np.array([1, 2])
lb = 0
ub = 2
# call custom quadprog
quadprog  = quadprog(H, f, A, b, x0, lb, ub)
print(quadprog.result)
The output of this short snippet is: 
     fun: -8.222222222222083
     jac: array([-2.66666675, -4.        ])
 message: 'Optimization terminated successfully.'
    nfev: 8
     nit: 2
    njev: 2
  status: 0
 success: True
       x: array([0.66666667, 1.33333333])
For more information on how to use scipy.optimize.minimize please refer to the docs.