You don't need multiprocessing or threading to run subprocesses in parallel. For example:
#!/usr/bin/env python
from subprocess import Popen
# run commands in parallel
processes = [Popen("echo {i:d}; sleep 2; echo {i:d}".format(i=i), shell=True)
             for i in range(5)]
# collect statuses
exitcodes = [p.wait() for p in processes]
it runs 5 shell commands simultaneously. Note: neither threads nor multiprocessing module are used here. There is no point to add ampersand & to the shell commands: Popen doesn't wait for the command to complete. You need to call .wait() explicitly.
It is convenient but it is not necessary to use threads to collect output from subprocesses:
#!/usr/bin/env python
from multiprocessing.dummy import Pool # thread pool
from subprocess import Popen, PIPE, STDOUT
# run commands in parallel
processes = [Popen("echo {i:d}; sleep 2; echo {i:d}".format(i=i), shell=True,
                   stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
             for i in range(5)]
# collect output in parallel
def get_lines(process):
    return process.communicate()[0].splitlines()
outputs = Pool(len(processes)).map(get_lines, processes)
Related: Python threading multiple bash subprocesses?.
Here's code example that gets output from several subprocesses concurrently in the same thread (Python 3.8+):
#!/usr/bin/env python3
import asyncio
import sys
from subprocess import PIPE, STDOUT
async def get_lines(shell_command):
    p = await asyncio.create_subprocess_shell(
        shell_command, stdin=PIPE, stdout=PIPE, stderr=STDOUT
    )
    return (await p.communicate())[0].splitlines()
async def main():
    # get commands output in parallel
    coros = [
        get_lines(
            f'"{sys.executable}" -c "print({i:d}); import time; time.sleep({i:d})"'
        )
        for i in range(5)
    ]
    print(await asyncio.gather(*coros))
if __name__ == "__main__":
    asyncio.run(main())
Old (2014) answer (Python 3.4?):
#!/usr/bin/env python3
import asyncio
import sys
from asyncio.subprocess import PIPE, STDOUT
@asyncio.coroutine
def get_lines(shell_command):
    p = yield from asyncio.create_subprocess_shell(shell_command,
            stdin=PIPE, stdout=PIPE, stderr=STDOUT)
    return (yield from p.communicate())[0].splitlines()
if sys.platform.startswith('win'):
    loop = asyncio.ProactorEventLoop() # for subprocess' pipes on Windows
    asyncio.set_event_loop(loop)
else:
    loop = asyncio.get_event_loop()
# get commands output in parallel
coros = [get_lines('"{e}" -c "print({i:d}); import time; time.sleep({i:d})"'
                    .format(i=i, e=sys.executable)) for i in range(5)]
print(loop.run_until_complete(asyncio.gather(*coros)))
loop.close()