data-writer.py 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. import cv2
  2. import pytesseract
  3. import time
  4. from datetime import datetime
  5. from pytesseract import Output
  6. import cv2
  7. import pytesseract
  8. from pytesseract import Output
  9. import numpy as np
  10. import re
  11. import os
  12. from influxdb import InfluxDBClient
  13. client = InfluxDBClient(host="localhost", port=8086, username="influxdb", password="influxdbTSGAMES", timeout=120_000)
  14. client.create_database("influxdb")
  15. client.switch_database('influxdb')
  16. class SolarMonitor:
  17. BRIGHTNESS_THRESHOLD = 50 # 0-255
  18. camera = None
  19. def __init__(self, test = False):
  20. self.test = test
  21. if not test:
  22. self.initCamera()
  23. def parse(self, img):
  24. custom_oem=r'--oem 3 --psm 7'
  25. # https://github.com/adrianlazaro8/Tesseract_sevenSegmentsLetsGoDigital
  26. data = pytesseract.image_to_data(img, lang='lets', config=custom_oem, output_type=Output.DICT)
  27. #data = pytesseract.image_to_data(img, lang='letsgodigital', config=custom_oem, output_type=Output.DICT)
  28. #data = pytesseract.image_to_data(img, config=custom_oem, output_type=Output.DICT)
  29. print(data)
  30. results = []
  31. for i in range(len(data['text'])):
  32. text = data['text'][i].strip('.,-_')
  33. text = re.sub('[^0-9]', '', text)
  34. if text:
  35. results.append(text)
  36. if len(results) == 2:
  37. results = list(map(lambda x: int(x) / 10.,results ))
  38. if results[0] < 80 and results[1] < 900:
  39. return results
  40. return None
  41. def getMasked(self, image):
  42. pixel_values = image.reshape((-1, 1))
  43. pixel_values = np.float32(pixel_values)
  44. _, result = cv2.threshold(image,self.BRIGHTNESS_THRESHOLD,255,cv2.THRESH_TRUNC)
  45. return result
  46. # define stopping criteria
  47. criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.2)
  48. # number of clusters (K)
  49. k = 2
  50. _, labels, (centers) = cv2.kmeans(pixel_values, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
  51. # convert back to 8 bit values
  52. centers = np.uint8(centers)
  53. # flatten the labels array
  54. labels = labels.flatten()
  55. # convert all pixels to the color of the centroids
  56. segmented_image = centers[labels.flatten()]
  57. segmented_image = segmented_image.reshape(image.shape)
  58. # disable only the cluster number 2 (turn the pixel into black)
  59. masked_image = np.copy(image)
  60. # convert to the shape of a vector of pixel values
  61. masked_image = masked_image.reshape((-1, 1))
  62. # color (i.e cluster) to disable
  63. cluster = 2
  64. masked_image[labels != cluster] = [0]
  65. # convert back to original shape
  66. masked_image = masked_image.reshape(image.shape)
  67. return masked_image
  68. def thresholding2(self, image):
  69. # image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
  70. # Taking a matrix of size 5 as the kernel
  71. kernel = np.ones((2, 2), np.uint8)
  72. #kernel = np.ones((2, 2), np.uint8)
  73. # The first parameter is the original image,
  74. # kernel is the matrix with which image is
  75. # convolved and third parameter is the number
  76. # of iterations, which will determine how much
  77. # you want to erode/dilate a given image.
  78. #image = cv2.convertScaleAbs(image, alpha=10.0, beta=-700)
  79. #image = cv2.erode(image, kernel, iterations=1)
  80. #image = cv2.convertScaleAbs(image, alpha=2.0, beta=-50)
  81. #image = (255 - image)
  82. # image = self.getMasked(image)
  83. # image = cv2.dilate(image, kernel, iterations=1)
  84. image = cv2.medianBlur(image,3)
  85. #thresh = cv2.adaptiveThreshold(image,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,51,1)
  86. # detect the contours on the binary image using cv2.CHAIN_APPROX_NONE
  87. #contours, hierarchy = cv2.findContours(image=thresh, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_NONE)
  88. #cv2.drawContours(image=image, contours=contours, contourIdx=-1, color=(255, 255, 255), thickness=1, lineType=cv2.LINE_AA)
  89. #im2, contours = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
  90. # cv2.drawContours(image, contours, -1, (0,255,0), 3)
  91. #image = np.invert(image)
  92. return image
  93. def crop(self, image):
  94. #top=384
  95. #left=210
  96. #height= 60
  97. #width=230
  98. #return image[top : (top + height) , left: (left + width)]
  99. pt_TL = [260, 190]
  100. pt_BL = [260, 242]
  101. pt_BR = [500, 240]
  102. pt_TR = [500, 190]
  103. width_AD = np.sqrt(((pt_TL[0] - pt_TR[0]) ** 2) + ((pt_TL[1] - pt_TR[1]) ** 2))
  104. width_BC = np.sqrt(((pt_BL[0] - pt_BR[0]) ** 2) + ((pt_BL[1] - pt_BR[1]) ** 2))
  105. maxWidth = max(int(width_AD), int(width_BC))
  106. height_AB = np.sqrt(((pt_TL[0] - pt_BL[0]) ** 2) + ((pt_TL[1] - pt_BL[1]) ** 2))
  107. height_CD = np.sqrt(((pt_BR[0] - pt_TR[0]) ** 2) + ((pt_BR[1] - pt_TR[1]) ** 2))
  108. maxHeight = max(int(height_AB), int(height_CD))
  109. input_pts = np.float32([pt_TL, pt_BL, pt_BR, pt_TR])
  110. output_pts = np.float32([[0, 0],
  111. [0, maxHeight - 1],
  112. [maxWidth - 1, maxHeight - 1],
  113. [maxWidth - 1, 0]])
  114. M = cv2.getPerspectiveTransform(input_pts,output_pts)
  115. image = cv2.warpPerspective(image,M,(maxWidth, maxHeight),flags=cv2.INTER_LINEAR)
  116. return image
  117. def initCamera(self):
  118. from picamera import PiCamera
  119. self.camera = PiCamera(
  120. resolution=(800, 608)
  121. )
  122. self.camera.awb_mode = 'off'
  123. self.camera.awb_gains = (1.5, 2.0)
  124. self.camera.shutter_speed = 10000
  125. self.camera.iso = 800
  126. self.camera.rotation = 0
  127. def capture(self):
  128. from picamera.array import PiRGBArray
  129. rawCapture = PiRGBArray(self.camera)
  130. file = "images/" + str(datetime.now()) + ".jpg"
  131. self.camera.capture(rawCapture, format="bgr")
  132. img = rawCapture.array
  133. img = self.crop(img)
  134. print("Captured.")
  135. return [file, img]
  136. def writeData(self, results):
  137. client.write_points([
  138. {
  139. "measurement": "solar",
  140. "tags": {
  141. "type": "U"
  142. },
  143. "fields": {
  144. "value": results[0]
  145. }
  146. },
  147. {
  148. "measurement": "solar",
  149. "tags": {
  150. "type": "W"
  151. },
  152. "fields": {
  153. "value": results[1]
  154. }
  155. }
  156. ], time_precision='ms')
  157. def run(self):
  158. if self.test:
  159. for file in os.listdir('tests'):
  160. img = cv2.imread('tests/' + file)
  161. img = self.thresholding2(img)
  162. results = self.parse(img)
  163. print(file)
  164. print(results)
  165. if results:
  166. self.writeData(results)
  167. cv2.imwrite(file + '_r.jpg', img)
  168. else:
  169. [file, img] = solar.capture()
  170. img = self.thresholding2(img)
  171. results = self.parse(img)
  172. print(results)
  173. if results:
  174. self.writeData(results)
  175. img = cv2.putText(img, str(results), (75, 22), cv2.FONT_HERSHEY_SIMPLEX, 0.25, (255, 255, 255, 255, 255), 1, cv2.LINE_AA)
  176. cv2.imwrite(file + '_r.jpg', img, [cv2.IMWRITE_JPEG_QUALITY, 35])
  177. time.sleep(5)
  178. solar = SolarMonitor(test = False)
  179. while True:
  180. solar.run()