Pyspark error " fs = FileSystem.get(Configuration()) TypeError: 'JavaPackage' object is not callable"

pyspark
apache-spark

#1

import sys, os
import configparser as cp
try:

from pyspark import SparkConf, SparkContext
props = cp.RawConfigParser()
props.read("src//main//resources//application.properties")

env = sys.argv[1]

conf = SparkConf(). \
    setMaster(props.get(sys.argv[5], 'executionMode')). \
    setAppName("DailyRevenue")
print(props.get(sys.argv[5], 'executionMode'))
sc = SparkContext(conf=conf)
Inputpath = sys.argv[1]
Outputpath = sys.argv[2]
month = sys.argv[3]
Path=sc._gateway.jvm.apache.hadoop.fs.Path
FileSystem = sc._gateway.jvm.apache.hadoop.fs.FileSystem
Configuration = sc._gateway.jvm.apache.hadoop.conf.Configuration
fs = FileSystem.get(Configuration())
if(fs.exists(Path(Inputpath))== False):
    print("path does not exist")
else:
    if(fs.exists(Path(Outputpath))):
        fs.delete(Path(Outputpath), True)

        ord = Inputpath + "orders"
        ordflt = sc.textFile(ord).filter(lambda o: month in o.split(",")[1]).\
            map(lambda o: (int(o.split(",")[0]),1))

        orditm = Inputpath + "orderitems"
        revnbyprdctid = sc.textFile(orditm).\
            map(lambda oi: (int(oi.split(",")[1]),
                            (int(oi.split(",")[2]),float(oi.split(",")[4])
                             ))
                ). \
            join(ordflt).\
            map(lambda rec: rec[1][0]).\
            reduceByKey(lambda total, ele: total+ele)
        localdir = sys.argv[4]
        prd = open(localdir + "products")
        prod = prd.read().splitlines()

        sc.parallelize(prod).\
            map(lambda p: (int(p.split(",")[0]),p.split(",")[2])
                ).\
            join(revnbyprdctid).\
            map(lambda p: p[1][0]+"\t"+str(p[1][1])).\
            saveAsTextFiles(Outputpath)

except ImportError as e:
print(“error in importing module”,e)
sys.exit(1)

This is my program and getting the below error. Can you please help in resolve this.

C:\Users\admin\PycharmProjects\Spark_Python\venv\Scripts\python.exe C:/Users/admin/PycharmProjects/Spark_Python/src/main/python/retail_db/RevenuePerMonth.py E:\ E:\ 2013-08 E:\ dev
local
2018-12-25 00:37:17 WARN NativeCodeLoader:62 - Unable to load native-hadoop library for your platform… using builtin-java classes where applicable
Setting default log level to “WARN”.
To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).
Traceback (most recent call last):
File “C:/Users/admin/PycharmProjects/Spark_Python/src/main/python/retail_db/RevenuePerMonth.py”, line 20, in
fs = FileSystem.get(Configuration())
TypeError: ‘JavaPackage’ object is not callable

Process finished with exit code 1