This commit is contained in:
raiots 2024-08-02 10:24:38 +08:00
commit 6d8c61d5b9
1256 changed files with 32537 additions and 0 deletions

1
.catkin_workspace Normal file
View File

@ -0,0 +1 @@
# This file currently only serves to mark the location of a catkin workspace for tool integration

51
.gitignore vendored Normal file
View File

@ -0,0 +1,51 @@
devel/
logs/
build/
bin/
lib/
msg_gen/
srv_gen/
msg/*Action.msg
msg/*ActionFeedback.msg
msg/*ActionGoal.msg
msg/*ActionResult.msg
msg/*Feedback.msg
msg/*Goal.msg
msg/*Result.msg
msg/_*.py
build_isolated/
devel_isolated/
# Generated by dynamic reconfigure
*.cfgc
/cfg/cpp/
/cfg/*.py
# Ignore generated docs
*.dox
*.wikidoc
# eclipse stuff
.project
.cproject
# qcreator stuff
CMakeLists.txt.user
srv/_*.py
*.pcd
*.pyc
qtcreator-*
*.user
/planning/cfg
/planning/docs
/planning/src
*~
# Emacs
.#*
# Catkin custom files
CATKIN_IGNORE

BIN
1722404103.1162927.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 298 KiB

BIN
1722421527.3884041.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 206 KiB

BIN
1722503579.3659685.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 190 KiB

BIN
1722503676.4216077.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 78 KiB

BIN
1722503845.860872.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 76 KiB

BIN
1722504321.978011.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 81 KiB

BIN
1722511537.5434725.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

BIN
1722511563.774326.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 81 KiB

BIN
1722519264.8660655.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 74 KiB

BIN
1722520961.9444659.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 82 KiB

BIN
1722520985.796584.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 85 KiB

BIN
1722521016.014052.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

Binary file not shown.

10
autostart/alphabetically.bash Executable file
View File

@ -0,0 +1,10 @@
#!/bin/bash
source /opt/ros/melodic/setup.bash
source ~/ros/devel/setup.bash
export ROS_HOME=/home/hiwonder/.ros
rosrun alphabetically alphabetically_main.py &
PID=$!
wait "$PID"

View File

@ -0,0 +1,16 @@
[Unit]
Requires=roscore.service usb_cam.service jetmax_control.service
After=NetworkManager.service time-sync.target usb_cam.service jetmax_control.service
[Service]
Type=simple
User=hiwonder
Restart=always
RestartSec=5
KillMode=mixed
ExecStart=/home/hiwonder/ros/autostart/alphabetically.bash
[Install]
WantedBy=multi-user.target

10
autostart/camera_cal.bash Executable file
View File

@ -0,0 +1,10 @@
#!/bin/bash
source /opt/ros/melodic/setup.bash
source ~/ros/devel/setup.bash
export ROS_HOME=/home/hiwonder/.ros
roslaunch camera_cal camera_cal.launch &
PID=$!
wait "$PID"

View File

@ -0,0 +1,15 @@
[Unit]
Requires=roscore.service usb_cam.service jetmax_control.service
After=NetworkManager.service time-sync.target usb_cam.service jetmax_control.service
[Service]
Type=simple
User=hiwonder
Restart=always
RestartSec=5
ExecStart=/home/hiwonder/ros/autostart/camera_cal.bash
[Install]
WantedBy=multi-user.target

10
autostart/color_sorting.bash Executable file
View File

@ -0,0 +1,10 @@
#!/bin/bash
source /opt/ros/melodic/setup.bash
source ~/ros/devel/setup.bash
export ROS_HOME=/home/hiwonder/.ros
rosrun color_sorting color_sorting_main.py &
PID=$!
wait "$PID"

View File

@ -0,0 +1,14 @@
[Unit]
Requires=roscore.service usb_cam.service jetmax_control.service lab_config.service camera_cal.service
After=NetworkManager.service usb_cam.service jetmax_control.service
[Service]
Type=simple
User=hiwonder
Restart=always
RestartSec=5
ExecStart=/home/hiwonder/ros/autostart/color_sorting.bash
[Install]
WantedBy=multi-user.target

11
autostart/jetmax_control.bash Executable file
View File

@ -0,0 +1,11 @@
#!/bin/bash
source /opt/ros/melodic/setup.bash
source ~/ros/devel/setup.bash
export ROS_HOME=/home/hiwonder/.ros
rosrun remote_control remote_control_joystick.py &
rosrun jetmax_control jetmax_control_main.py &
PID=$!
wait "$PID"

View File

@ -0,0 +1,15 @@
[Unit]
Requires=roscore.service rosbridge.service
After=NetworkManager.service time-sync.target roscore.service rosbridge.service
[Service]
Type=simple
User=hiwonder
Restart=always
RestartSec=5
KillMode=mixed
ExecStart=/home/hiwonder/ros/autostart/jetmax_control.bash
[Install]
WantedBy=multi-user.target

10
autostart/lab_config.bash Executable file
View File

@ -0,0 +1,10 @@
#!/bin/bash
source /opt/ros/melodic/setup.bash
source ~/ros/devel/setup.bash
export ROS_HOME=/home/hiwonder/.ros
roslaunch lab_config lab_config_manager.launch &
PID=$!
wait "$PID"

View File

@ -0,0 +1,15 @@
[Unit]
Requires=roscore.service usb_cam.service
After=NetworkManager.service usb_cam.service
[Service]
Type=simple
User=hiwonder
Restart=always
RestartSec=5
ExecStart=/home/hiwonder/ros/autostart/lab_config.bash
[Install]
WantedBy=multi-user.target

10
autostart/object_tracking.bash Executable file
View File

@ -0,0 +1,10 @@
#!/bin/bash
source /opt/ros/melodic/setup.bash
source ~/ros/devel/setup.bash
export ROS_HOME=/home/hiwonder/.ros
rosrun object_tracking object_tracking_main.py &
PID=$!
wait "$PID"

View File

@ -0,0 +1,16 @@
[Unit]
Requires=roscore.service usb_cam.service
After=NetworkManager.service usb_cam.service
[Service]
Type=simple
User=hiwonder
Restart=always
RestartSec=5
KillMode=mixed
ExecStart=/home/hiwonder/ros/autostart/object_tracking.bash
[Install]
WantedBy=multi-user.target

10
autostart/palletizing.bash Executable file
View File

@ -0,0 +1,10 @@
#!/bin/bash
source /opt/ros/melodic/setup.bash
source /home/hiwonder/ros/devel/setup.bash
export ROS_HOME=/home/hiwonder/.ros
rosrun palletizing palletizing_main.py &
PID=$!
wait "$PID"

View File

@ -0,0 +1,15 @@
[Unit]
Requires=roscore.service usb_cam.service jetmax_control.service camera_cal.service
After=NetworkManager.service usb_cam.service jetmax_control.service
[Service]
Type=simple
User=hiwonder
Restart=always
RestartSec=5
ExecStart=/home/hiwonder/ros/autostart/palletizing.bash
[Install]
WantedBy=multi-user.target

10
autostart/rosbridge.bash Executable file
View File

@ -0,0 +1,10 @@
#!/bin/bash
source /opt/ros/melodic/setup.bash
source ~/ros/devel/setup.bash
export ROS_HOME=/home/hiwonder/.ros
roslaunch /home/hiwonder/ros/autostart/rosbridge_websocket.launch &
PID=$!
wait "$PID"

View File

@ -0,0 +1,15 @@
[Unit]
Requires=roscore.service
After=NetworkManager.service time-sync.target roscore.service
[Service]
Type=simple
User=hiwonder
Restart=always
RestartSec=5
ExecStart=/home/hiwonder/ros/autostart/rosbridge.bash
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,87 @@
<launch>
<arg name="port" default="9090" />
<arg name="address" default="0.0.0.0" />
<arg name="ssl" default="false" />
<arg name="certfile" default=""/>
<arg name="keyfile" default="" />
<arg name="retry_startup_delay" default="5" />
<arg name="fragment_timeout" default="600" />
<arg name="delay_between_messages" default="0" />
<arg name="max_message_size" default="None" />
<arg name="unregister_timeout" default="1000000" />
<arg name="websocket_external_port" default="None" />
<arg name="use_compression" default="false" />
<arg name="authenticate" default="false" />
<arg name="websocket_ping_interval" default="0" />
<arg name="websocket_ping_timeout" default="30" />
<arg name="websocket_null_origin" default="true" />
<arg name="topics_glob" default="[*]" />
<arg name="services_glob" default="[*]" />
<arg name="params_glob" default="[*]" />
<arg name="bson_only_mode" default="false" />
<!-- Output: screen, log -->
<arg name="output" default="screen" />
<!-- Valid options for binary_encoder are "default", "b64" and "bson". -->
<arg unless="$(arg bson_only_mode)" name="binary_encoder" default="default"/>
<group if="$(arg ssl)">
<node name="rosbridge_websocket" pkg="rosbridge_server" type="rosbridge_websocket" output="$(arg output)">
<param name="certfile" value="$(arg certfile)" />
<param name="keyfile" value="$(arg keyfile)" />
<param name="authenticate" value="$(arg authenticate)" />
<param name="port" value="$(arg port)"/>
<param name="address" value="$(arg address)"/>
<param name="retry_startup_delay" value="$(arg retry_startup_delay)"/>
<param name="fragment_timeout" value="$(arg fragment_timeout)"/>
<param name="delay_between_messages" value="$(arg delay_between_messages)"/>
<param name="max_message_size" value="$(arg max_message_size)"/>
<param name="unregister_timeout" value="$(arg unregister_timeout)"/>
<param name="use_compression" value="$(arg use_compression)"/>
<param name="websocket_ping_interval" value="$(arg websocket_ping_interval)" />
<param name="websocket_ping_timeout" value="$(arg websocket_ping_timeout)" />
<param name="websocket_external_port" value="$(arg websocket_external_port)" />
<param name="websocket_null_origin" value="$(arg websocket_null_origin)" />
<param name="topics_glob" value="$(arg topics_glob)"/>
<param name="services_glob" value="$(arg services_glob)"/>
<param name="params_glob" value="$(arg params_glob)"/>
</node>
</group>
<group unless="$(arg ssl)">
<node name="rosbridge_websocket" pkg="rosbridge_server" type="rosbridge_websocket" output="$(arg output)">
<param name="authenticate" value="$(arg authenticate)" />
<param name="port" value="$(arg port)"/>
<param name="address" value="$(arg address)"/>
<param name="retry_startup_delay" value="$(arg retry_startup_delay)"/>
<param name="fragment_timeout" value="$(arg fragment_timeout)"/>
<param name="delay_between_messages" value="$(arg delay_between_messages)"/>
<param name="max_message_size" value="$(arg max_message_size)"/>
<param name="unregister_timeout" value="$(arg unregister_timeout)"/>
<param name="use_compression" value="$(arg use_compression)"/>
<param name="websocket_ping_interval" value="$(arg websocket_ping_interval)" />
<param name="websocket_ping_timeout" value="$(arg websocket_ping_timeout)" />
<param name="websocket_external_port" value="$(arg websocket_external_port)" />
<param name="topics_glob" value="$(arg topics_glob)"/>
<param name="services_glob" value="$(arg services_glob)"/>
<param name="params_glob" value="$(arg params_glob)"/>
<param name="bson_only_mode" value="$(arg bson_only_mode)"/>
</node>
</group>
<node name="rosapi" pkg="rosapi" type="rosapi_node" output="$(arg output)">
<param name="topics_glob" value="$(arg topics_glob)"/>
<param name="services_glob" value="$(arg services_glob)"/>
<param name="params_glob" value="$(arg params_glob)"/>
</node>
</launch>

17
autostart/roscore.service Normal file
View File

@ -0,0 +1,17 @@
[Unit]
Description=roscore starter
After=NetworkManager.service time-sync.target
[Service]
# Start roscore as a fork and then wait for the tcp port to be opened
# —————————————————————-
# Source all the environment variables, start roscore in a fork
# Since the service type is forking, systemd doesnt mark it as
# started until the original process exits, so we have the
# non-forked shell wait until it can connect to the tcp opened by
# roscore, and then exit, preventing conflicts with dependant services
Type=forking
ExecStart=/bin/bash -c "source /opt/ros/melodic/setup.bash;roscore & while ! echo exit |nc localhost 11311 > /dev/null;do sleep 1;done"
[Install]
WantedBy=multi-user.target

10
autostart/usb_cam.bash Executable file
View File

@ -0,0 +1,10 @@
#!/bin/bash
source /opt/ros/melodic/setup.bash
source ~/ros/devel/setup.bash
export ROS_HOME=/home/hiwonder/.ros
roslaunch /home/hiwonder/ros/autostart/usb_cam.launch &
PID=$!
wait "$PID"

24
autostart/usb_cam.launch Normal file
View File

@ -0,0 +1,24 @@
<launch>
<arg name="camera_info_topic_name" default="/usb_cam/camera_info"/>
<node name="usb_cam" pkg="usb_cam" type="usb_cam_node" output="screen" respawn="true" respawn_delay="2">
<param name="video_device" value="/dev/usb_cam0" />
<param name="image_width" value="640" />
<param name="image_height" value="480" />
<param name="pixel_format" value="yuyv" />
<param name="camera_frame_id" value="usb_cam" />
<param name="io_method" value="mmap"/>
</node>
<node name="image_proc" pkg="image_proc" type="image_proc" ns="usb_cam"/>
<node name="web_video_server" pkg="web_video_server" type="web_video_server" output="screen">
<param name="port" type="int" value="8080" />
<param name="address" type="string" value="0.0.0.0" />
<param name="server_threads" type="int" value="2" />
<param name="ros_threads" type="int" value="2" />
<param name="width" type="int" value="640" />
<param name="height" type="int" value="480" />
<param name="quality" type="int" value="90" />
<param name="type" type="string" value="h264" />
</node>
</launch>

16
autostart/usb_cam.service Normal file
View File

@ -0,0 +1,16 @@
[Unit]
Requires=roscore.service
After=NetworkManager.service time-sync.target roscore.service
[Service]
Type=simple
User=hiwonder
Restart=always
RestartSec=5
KillMode=mixed
ExecStart=/home/hiwonder/ros/autostart/usb_cam.bash
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,10 @@
#!/bin/bash
source /opt/ros/melodic/setup.bash
source ~/ros/devel/setup.bash
export ROS_HOME=/home/hiwonder/.ros
rosrun waste_classification waste_classification_main.py &
PID=$!
wait "$PID"

View File

@ -0,0 +1,16 @@
[Unit]
Requires=roscore.service usb_cam.service jetmax_control.service
After=NetworkManager.service usb_cam.service jetmax_control.service
[Service]
Type=simple
User=hiwonder
Restart=always
RestartSec=5
KillMode=mixed
ExecStart=/home/hiwonder/ros/autostart/waste_classification.bash
[Install]
WantedBy=multi-user.target

BIN
camera_params.npz Normal file

Binary file not shown.

View File

@ -0,0 +1,203 @@
cmake_minimum_required(VERSION 3.0.2)
project(Ai_JetMax)
## Compile as C++11, supported in ROS Kinetic and newer
# add_compile_options(-std=c++11)
## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
)
## System dependencies are found with CMake's conventions
# find_package(Boost REQUIRED COMPONENTS system)
## Uncomment this if the package has a setup.py. This macro ensures
## modules and global scripts declared therein get installed
## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
# catkin_python_setup()
################################################
## Declare ROS messages, services and actions ##
################################################
## To declare and build messages, services or actions from within this
## package, follow these steps:
## * Let MSG_DEP_SET be the set of packages whose message types you use in
## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
## * In the file package.xml:
## * add a build_depend tag for "message_generation"
## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
## but can be declared for certainty nonetheless:
## * add a exec_depend tag for "message_runtime"
## * In this file (CMakeLists.txt):
## * add "message_generation" and every package in MSG_DEP_SET to
## find_package(catkin REQUIRED COMPONENTS ...)
## * add "message_runtime" and every package in MSG_DEP_SET to
## catkin_package(CATKIN_DEPENDS ...)
## * uncomment the add_*_files sections below as needed
## and list every .msg/.srv/.action file to be processed
## * uncomment the generate_messages entry below
## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
## Generate messages in the 'msg' folder
# add_message_files(
# FILES
# Message1.msg
# Message2.msg
# )
## Generate services in the 'srv' folder
# add_service_files(
# FILES
# Service1.srv
# Service2.srv
# )
## Generate actions in the 'action' folder
# add_action_files(
# FILES
# Action1.action
# Action2.action
# )
## Generate added messages and services with any dependencies listed here
# generate_messages(
# DEPENDENCIES
# std_msgs # Or other packages containing msgs
# )
################################################
## Declare ROS dynamic reconfigure parameters ##
################################################
## To declare and build dynamic reconfigure parameters within this
## package, follow these steps:
## * In the file package.xml:
## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
## * In this file (CMakeLists.txt):
## * add "dynamic_reconfigure" to
## find_package(catkin REQUIRED COMPONENTS ...)
## * uncomment the "generate_dynamic_reconfigure_options" section below
## and list every .cfg file to be processed
## Generate dynamic reconfigure parameters in the 'cfg' folder
# generate_dynamic_reconfigure_options(
# cfg/DynReconf1.cfg
# cfg/DynReconf2.cfg
# )
###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if your package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
catkin_package(
# INCLUDE_DIRS include
# LIBRARIES Ai_JetMax
# CATKIN_DEPENDS python
# DEPENDS system_lib
)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
include_directories(
# include
${catkin_INCLUDE_DIRS}
)
## Declare a C++ library
# add_library(${PROJECT_NAME}
# src/${PROJECT_NAME}/Ai_JetMax.cpp
# )
## Add cmake target dependencies of the library
## as an example, code may need to be generated before libraries
## either from message generation or dynamic reconfigure
# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Declare a C++ executable
## With catkin_make all packages are built within a single CMake context
## The recommended prefix ensures that target names across packages don't collide
# add_executable(${PROJECT_NAME}_node src/Ai_JetMax_node.cpp)
## Rename C++ executable without prefix
## The above recommended prefix causes long target names, the following renames the
## target back to the shorter version for ease of user use
## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
## Add cmake target dependencies of the executable
## same as for the library above
# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Specify libraries to link a library or executable target against
# target_link_libraries(${PROJECT_NAME}_node
# ${catkin_LIBRARIES}
# )
#############
## Install ##
#############
# all install targets should use catkin DESTINATION variables
# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
## Mark executable scripts (Python etc.) for installation
## in contrast to setup.py, you can choose the destination
# catkin_install_python(PROGRAMS
# scripts/my_python_script
# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark executables for installation
## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
# install(TARGETS ${PROJECT_NAME}_node
# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark libraries for installation
## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
# install(TARGETS ${PROJECT_NAME}
# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
# )
## Mark cpp header files for installation
# install(DIRECTORY include/${PROJECT_NAME}/
# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
# FILES_MATCHING PATTERN "*.h"
# PATTERN ".svn" EXCLUDE
# )
## Mark other files for installation (e.g. launch and bag files, etc.)
# install(FILES
# # myfile1
# # myfile2
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
# )
#############
## Testing ##
#############
## Add gtest based cpp test target and link libraries
# catkin_add_gtest(${PROJECT_NAME}-test test/test_Ai_JetMax.cpp)
# if(TARGET ${PROJECT_NAME}-test)
# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
# endif()
## Add folders to be run by python nosetests
# catkin_add_nosetests(test)

59
src/Ai_JetMax/package.xml Normal file
View File

@ -0,0 +1,59 @@
<?xml version="1.0"?>
<package format="2">
<name>Ai_JetMax</name>
<version>0.0.0</version>
<description>The Ai_JetMax package</description>
<!-- One maintainer tag required, multiple allowed, one person per tag -->
<!-- Example: -->
<!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
<maintainer email="hiwonder@todo.todo">hiwonder</maintainer>
<!-- One license tag required, multiple allowed, one license per tag -->
<!-- Commonly used license strings: -->
<!-- BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
<license>TODO</license>
<!-- Url tags are optional, but multiple are allowed, one per tag -->
<!-- Optional attribute type can be: website, bugtracker, or repository -->
<!-- Example: -->
<!-- <url type="website">http://wiki.ros.org/Ai_JetMax</url> -->
<!-- Author tags are optional, multiple are allowed, one per tag -->
<!-- Authors do not have to be maintainers, but could be -->
<!-- Example: -->
<!-- <author email="jane.doe@example.com">Jane Doe</author> -->
<!-- The *depend tags are used to specify dependencies -->
<!-- Dependencies can be catkin packages or system dependencies -->
<!-- Examples: -->
<!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
<!-- <depend>roscpp</depend> -->
<!-- Note that this is equivalent to the following: -->
<!-- <build_depend>roscpp</build_depend> -->
<!-- <exec_depend>roscpp</exec_depend> -->
<!-- Use build_depend for packages you need at compile time: -->
<!-- <build_depend>message_generation</build_depend> -->
<!-- Use build_export_depend for packages you need in order to build against this package: -->
<!-- <build_export_depend>message_generation</build_export_depend> -->
<!-- Use buildtool_depend for build tool packages: -->
<!-- <buildtool_depend>catkin</buildtool_depend> -->
<!-- Use exec_depend for packages you need at runtime: -->
<!-- <exec_depend>message_runtime</exec_depend> -->
<!-- Use test_depend for packages you need only for testing: -->
<!-- <test_depend>gtest</test_depend> -->
<!-- Use doc_depend for packages you need only for building documentation: -->
<!-- <doc_depend>doxygen</doc_depend> -->
<buildtool_depend>catkin</buildtool_depend>
<!-- The export tag contains other, unspecified, tags -->
<export>
<!-- Other tools can request additional information be placed here -->
</export>
</package>

Binary file not shown.

View File

@ -0,0 +1,123 @@
#!/usr/bin/env python3
import sys
import cv2
import math
import time
import rospy
import numpy as np
import threading
from sensor_msgs.msg import Image
from std_msgs.msg import Bool
from std_srvs.srv import SetBool, SetBoolResponse, SetBoolRequest
from std_srvs.srv import Trigger, TriggerResponse, TriggerRequest
from std_srvs.srv import Empty
from jetmax_control.msg import SetServo
import hiwonder
import queue
import pupil_apriltags as apriltag
import yaml
"""
Apriltag 这是apriltag的简单距离探测
请将Jetmax调整到摄像头朝前的形态
请将标签正对摄像头
"""
ROS_NODE_NAME = "apriltag_detector"
TAG_SIZE = 33.30
class AprilTagDetect:
def __init__(self):
self.camera_params = None
self.K = None
self.R = None
self.T = None
def load_camera_params(self):
self.camera_params = rospy.get_param('/camera_cal/block_params', self.camera_params)
if self.camera_params is not None:
self.K = np.array(self.camera_params['K'], dtype=np.float64).reshape(3, 3)
self.R = np.array(self.camera_params['R'], dtype=np.float64).reshape(3, 1)
self.T = np.array(self.camera_params['T'], dtype=np.float64).reshape(3, 1)
def image_proc_a(img):
frame_gray = cv2.cvtColor(np.copy(img), cv2.COLOR_RGB2GRAY) # 将画面转为灰度图
params = [state.K[0][0], state.K[1][1], state.K[0][2], state.K[1][2]] # 相机内参
tags = at_detector.detect(frame_gray, estimate_tag_pose=True, camera_params=params, tag_size=TAG_SIZE) # 进行AprilTag的检测
if not tags:
hiwonder.buzzer.off()
for tag in tags:
corners = tag.corners.reshape(1, -1, 2).astype(int) # 检测到的AprilTag的四个角的点
center = tag.center.astype(int) # AprilTag中心点
cv2.drawContours(img, corners, -1, (255, 0, 0), 3) # 画出外框
cv2.circle(img, tuple(center), 5, (255, 255, 0), 10) # 画出中心点
point_3d = np.array([[16.65, -16.65, 0],
[-16.65,-16.65, 0],
[-16.65, 16.65, 0],
[16.65, 16.65, 0]], dtype=np.double)
point_2d = np.array([tag.corners[0].astype(int),
tag.corners[1].astype(int),
tag.corners[2].astype(int),
tag.corners[3].astype(int)],
dtype=np.double)
dist_coefs = np.array([0,0,0,0], dtype=np.double)
found, rvec, tvec = cv2.solvePnP(point_3d, point_2d, state.K, None)
rotM = cv2.Rodrigues(rvec)[0]
camera_position = -np.matrix(rotM).T * np.matrix(tvec)
distance = -camera_position.T.tolist()[0][2]
print("Distance: {:0.2f}mm".format(distance))
if distance < 150:
hiwonder.buzzer.on()
else:
hiwonder.buzzer.off()
img_h, img_w = img.shape[:2]
cv2.line(img, (int(img_w / 2 - 10), int(img_h / 2)), (int(img_w / 2 + 10), int(img_h / 2)), (0, 255, 255), 2)
cv2.line(img, (int(img_w / 2), int(img_h / 2 - 10)), (int(img_w / 2), int(img_h / 2 + 10)), (0, 255, 255), 2)
return img
def image_proc_b():
ros_image = image_queue.get(block=True)
image = np.ndarray(shape=(ros_image.height, ros_image.width, 3), dtype=np.uint8, buffer=ros_image.data)
frame_result = image.copy()
frame_result = image_proc_a(frame_result)
bgr_image = cv2.cvtColor(frame_result, cv2.COLOR_RGB2BGR)
cv2.imshow('result', bgr_image)
cv2.waitKey(1)
def image_callback(ros_image):
try:
image_queue.put_nowait(ros_image)
except queue.Full:
pass
if __name__ == '__main__':
state = AprilTagDetect()
jetmax = hiwonder.JetMax()
sucker = hiwonder.Sucker()
at_detector = apriltag.Detector()
image_queue = queue.Queue(maxsize=1)
rospy.init_node(ROS_NODE_NAME, log_level=rospy.DEBUG)
jetmax.go_home()
state.load_camera_params()
if state.camera_params is None:
rospy.logerr('Can not load camera parameters')
sys.exit(-1)
rospy.ServiceProxy('/jetmax/go_home', Empty)()
rospy.Publisher('/jetmax/end_effector/sucker/command', Bool, queue_size=1).publish(data=False)
rospy.Publisher('/jetmax/end_effector/servo1/command', SetServo, queue_size=1).publish(data=90, duration=0.5)
image_sub = rospy.Subscriber('/usb_cam/image_rect_color', Image, image_callback)
while not rospy.is_shutdown():
image_proc_b()

View File

@ -0,0 +1,146 @@
#!/usr/bin/env python3
import sys
import cv2
import math
import time
import rospy
import numpy as np
import threading
from sensor_msgs.msg import Image
from std_srvs.srv import Trigger, TriggerResponse, TriggerRequest
from std_srvs.srv import Empty
from jetmax_control.msg import SetServo
import hiwonder
import queue
import pupil_apriltags as apriltag
import yaml
"""
Apriltag 识别定位实验
将jetmax调整到摄像头朝下的形态
将id1及其他id的apriltag放于摄像头下方 程序将识别apriltag并计算其他tag相对与id1的tag的位置
"""
ROS_NODE_NAME = "apriltag_detector"
TAG_SIZE = 33.30
class AprilTagDetect:
def __init__(self):
self.camera_params = None
self.K = None
self.R = None
self.T = None
def load_camera_params(self):
self.camera_params = rospy.get_param('/camera_cal/block_params', self.camera_params)
if self.camera_params is not None:
self.K = np.array(self.camera_params['K'], dtype=np.float64).reshape(3, 3)
self.T = np.array(self.camera_params['T'], dtype=np.float64).reshape(3, 1)
self.R = np.array(self.camera_params['R'], dtype=np.float64).reshape(3, 1)
r_mat = np.zeros((3, 3), dtype=np.float64)
cv2.Rodrigues(self.R, r_mat)
self.r_mat = r_mat
def camera_to_world(cam_mtx, r_mat, t, img_points):
"""
通过图片坐标及相机内外参计算现实位置
"""
inv_k = np.asmatrix(cam_mtx).I
# invR * T
inv_r = np.asmatrix(r_mat).I # 3*3
transPlaneToCam = np.dot(inv_r, np.asmatrix(t)) # 3*3 dot 3*1 = 3*1
world_pt = []
coords = np.zeros((3, 1), dtype=np.float64)
for img_pt in img_points:
coords[0][0] = img_pt[0][0]
coords[1][0] = img_pt[0][1]
coords[2][0] = 1.0
worldPtCam = np.dot(inv_k, coords) # 3*3 dot 3*1 = 3*1
# [x,y,1] * invR
worldPtPlane = np.dot(inv_r, worldPtCam) # 3*3 dot 3*1 = 3*1
# zc
scale = transPlaneToCam[2][0] / worldPtPlane[2][0]
# zc * [x,y,1] * invR
scale_worldPtPlane = np.multiply(scale, worldPtPlane)
# [X,Y,Z]=zc*[x,y,1]*invR - invR*T
worldPtPlaneReproject = np.asmatrix(scale_worldPtPlane) - np.asmatrix(transPlaneToCam) # 3*1 dot 1*3 = 3*3
pt = np.zeros((3, 1), dtype=np.float64)
pt[0][0] = worldPtPlaneReproject[0][0]
pt[1][0] = worldPtPlaneReproject[1][0]
pt[2][0] = 0
world_pt.append(pt.T.tolist())
return world_pt
def image_proc_a(img):
frame_gray = cv2.cvtColor(np.copy(img), cv2.COLOR_RGB2GRAY) # 将画面转为灰度图
params = [state.K[0][0], state.K[1][1], state.K[0][2], state.K[1][2]] # 相机内参
tags = at_detector.detect(frame_gray, estimate_tag_pose=True, camera_params=params, tag_size=TAG_SIZE) # 进行AprilTag的检测
for tag in tags:
corners = tag.corners.reshape(1, -1, 2).astype(int) # 检测到的AprilTag的四个角的点
center = tag.center.astype(int) # AprilTag中心点
cv2.drawContours(img, corners, -1, (255, 0, 0), 3) # 画出外框
cv2.circle(img, tuple(center), 5, (255, 255, 0), 10) # 画出中心点
rotM = tag.pose_R
tvec = tag.pose_t
if tag.tag_id == 1: #如果是id1就将外参保存起来
state.r_mat = rotM
state.T = tvec
else:
#如果如果不是id1 就计算相关对位置
x, y, _ = camera_to_world(state.K, state.r_mat, state.T, tag.center.reshape((1,1,2)))[0][0]
#计算欧拉角
theta_z = math.atan2(rotM[1, 0], rotM[0, 0])*180.0/math.pi
theta_y = math.atan2(-1.0*rotM[2, 0], math.sqrt(rotM[2, 1]**2 + rotM[2, 2]**2))*180.0/math.pi
theta_x = math.atan2(rotM[2, 1], rotM[2, 2])*180.0/math.pi
print("id:{}, x:{:0.2f}mm, y:{:0.2f}mm, angle:{:0.2f}deg".format(tag.tag_id, x, y, theta_z))
s1 = "id:{}".format(tag.tag_id)
s2 = "x:{:0.2f}mm, y:{:0.2f}mm".format(x, y)
s3 = "angle:{:0.2f}deg".format(theta_z)
cv2.putText(img, s1, (center[0] - 50, center[1]), 0, 0.7, (0, 255, 0), 2)
cv2.putText(img, s2, (center[0] - 50, center[1]+20), 0, 0.7, (0, 255, 0), 2)
cv2.putText(img, s3, (center[0] - 50, center[1] + 40), 0, 0.7, (0, 255, 0), 2)
img_h, img_w = img.shape[:2]
cv2.line(img, (int(img_w / 2 - 10), int(img_h / 2)), (int(img_w / 2 + 10), int(img_h / 2)), (0, 255, 255), 2)
cv2.line(img, (int(img_w / 2), int(img_h / 2 - 10)), (int(img_w / 2), int(img_h / 2 + 10)), (0, 255, 255), 2)
return img
def image_proc_b():
ros_image = image_queue.get(block=True)
image = np.ndarray(shape=(ros_image.height, ros_image.width, 3), dtype=np.uint8, buffer=ros_image.data)
frame_result = image.copy()
frame_result = image_proc_a(frame_result)
bgr_image = cv2.cvtColor(frame_result, cv2.COLOR_RGB2BGR)
cv2.imshow('result', bgr_image)
cv2.waitKey(1)
def image_callback(ros_image):
try:
image_queue.put_nowait(ros_image)
except queue.Full:
pass
if __name__ == '__main__':
state = AprilTagDetect()
jetmax = hiwonder.JetMax()
sucker = hiwonder.Sucker()
at_detector = apriltag.Detector()
image_queue = queue.Queue(maxsize=1)
rospy.init_node(ROS_NODE_NAME, log_level=rospy.DEBUG)
jetmax.go_home()
state.load_camera_params()
image_sub = rospy.Subscriber('/usb_cam/image_rect_color', Image, image_callback, queue_size=1)
if state.camera_params is None:
rospy.logerr('Can not load camera parameters')
sys.exit(-1)
while not rospy.is_shutdown():
image_proc_b()

View File

@ -0,0 +1,197 @@
#!/usr/bin/env python3
import sys
import cv2
import math
import time
import rospy
import numpy as np
import threading
from sensor_msgs.msg import Image
from std_msgs.msg import Bool
from std_srvs.srv import SetBool, SetBoolResponse, SetBoolRequest
from std_srvs.srv import Trigger, TriggerResponse, TriggerRequest
from std_srvs.srv import Empty
from jetmax_control.msg import SetServo
import hiwonder
import queue
import pupil_apriltags as apriltag
import yaml
"""
Apriltag追踪实验
请将jetmax调整到摄像头朝前状态
jetmax将追踪apriltag左右上向前后移动
程序将维持apriltag在画面中心摄像头前方15cm处
程序没有对出现多个apriltag的情况做处理 多个apriltag同时出现可能错乱
若标签距离摄像头过远程序会因机械臂无法到达目标位置而退出
"""
ROS_NODE_NAME = "apriltag_detector"
TAG_SIZE = 33.30
class AprilTagDetect:
def __init__(self):
self.is_running = False
self.moving_block = None
self.image_sub = None
self.runner = None
self.count = 0
self.level = 0
self.lock = threading.RLock()
self.camera_params = None
self.K = None
self.R = None
self.T = None
self.pid_x = hiwonder.PID(0.07, 0, 0)
self.pid_y = hiwonder.PID(0.05, 0, 0)
self.pid_z = hiwonder.PID(0.05, 0, 0)
self.servo_x = 500
def reset(self):
self.is_running = False
self.moving_block = None
self.image_sub = None
self.runner = None
self.count = 0
self.level = 0
def load_camera_params(self):
self.camera_params = rospy.get_param('/camera_cal/block_params', self.camera_params)
if self.camera_params is not None:
self.K = np.array(self.camera_params['K'], dtype=np.float64).reshape(3, 3)
self.R = np.array(self.camera_params['R'], dtype=np.float64).reshape(3, 1)
self.T = np.array(self.camera_params['T'], dtype=np.float64).reshape(3, 1)
def rotation_mtx_to_euler(R):
sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
singular = sy < 1e-6
if not singular:
x = math.atan2(R[2, 1], R[2, 2])
y = math.atan2(-R[2, 0], sy)
z = math.atan2(R[1, 0], R[0, 0])
else:
x = math.atan2(-R[1, 2], R[1, 1])
y = math.atan2(-R[2, 0], sy)
z = 0
return np.array([x, y, z])
def RotateByZ(Cx, Cy, thetaZ):
rz = thetaZ*math.pi/180.0
outX = math.cos(rz)*Cx - math.sin(rz)*Cy
outY = math.sin(rz)*Cx + math.cos(rz)*Cy
return outX, outY
def RotateByY(Cx, Cz, thetaY):
ry = thetaY*math.pi/180.0
outZ = math.cos(ry)*Cz - math.sin(ry)*Cx
outX = math.sin(ry)*Cz + math.cos(ry)*Cx
return outX, outZ
def RotateByX(Cy, Cz, thetaX):
rx = thetaX*math.pi/180.0
outY = math.cos(rx)*Cy - math.sin(rx)*Cz
outZ = math.sin(rx)*Cy + math.cos(rx)*Cz
return outY, outZ
def image_proc_a(img):
frame_gray = cv2.cvtColor(np.copy(img), cv2.COLOR_RGB2GRAY) # 将画面转为灰度图
params = [state.K[0][0], state.K[1][1], state.K[0][2], state.K[1][2]] # 相机内参
tags = at_detector.detect(frame_gray, estimate_tag_pose=True, camera_params=params, tag_size=TAG_SIZE) # 进行AprilTag的检测
for tag in tags:
corners = tag.corners.reshape(1, -1, 2).astype(int) # 检测到的AprilTag的四个角的点
center = tag.center.astype(int) # AprilTag中心点
cv2.drawContours(img, corners, -1, (255, 0, 0), 3) # 画出外框
cv2.circle(img, tuple(center), 5, (255, 255, 0), 10) # 画出中心点
point_3d = np.array([[16.65, -16.65, 0],
[-16.65,-16.65, 0],
[-16.65, 16.65, 0],
[16.65, 16.65, 0]], dtype=np.double)
point_2d = np.array([tag.corners[0].astype(int),
tag.corners[1].astype(int),
tag.corners[2].astype(int),
tag.corners[3].astype(int)],
dtype=np.double)
dist_coefs = np.array([0,0,0,0], dtype=np.double)
found, rvec, tvec = cv2.solvePnP(point_3d, point_2d, state.K, None)
rotM = cv2.Rodrigues(rvec)[0]
camera_postion = -np.matrix(rotM).T * np.matrix(tvec)
print(camera_postion.T)
#计算三轴的旋转角
thetaZ = math.atan2(rotM[1, 0], rotM[0, 0])*180.0/math.pi
thetaY = math.atan2(-1.0*rotM[2, 0], math.sqrt(rotM[2, 1]**2 + rotM[2, 2]**2))*180.0/math.pi
thetaX = math.atan2(rotM[2, 1], rotM[2, 2])*180.0/math.pi
# camera coordinates
x = tvec[0]
y = tvec[1]
z = tvec[2]
(x, y) = RotateByZ(x, y, -1.0*thetaZ)
(x, z) = RotateByY(x, z, -1.0*thetaY)
(y, z) = RotateByX(y, z, -1.0*thetaX)
Cx = x*-1
Cy = y*-1
Cz = z*-1
print("camera position:",Cx, Cy, Cz)
print("camera rotation:", thetaX, thetaY, thetaZ)
center_x, center_y = tag.center
e_x = 320 - center_x
e_y = 240 - center_y
e_z = -150 - Cz
state.pid_x.update(e_x)
state.servo_x -= state.pid_x.output
jetmax.set_servo(1, state.servo_x, 0.04)
cur = list(jetmax.position)
state.pid_y.update(e_y)
state.pid_z.update(e_z)
cur[2] -= state.pid_y.output
cur[1] += state.pid_z.output
jetmax.set_position(cur, 0.04)
img_h, img_w = img.shape[:2]
cv2.line(img, (int(img_w / 2 - 10), int(img_h / 2)), (int(img_w / 2 + 10), int(img_h / 2)), (0, 255, 255), 2)
cv2.line(img, (int(img_w / 2), int(img_h / 2 - 10)), (int(img_w / 2), int(img_h / 2 + 10)), (0, 255, 255), 2)
return img
def image_proc_b():
ros_image = image_queue.get(block=True)
image = np.ndarray(shape=(ros_image.height, ros_image.width, 3), dtype=np.uint8, buffer=ros_image.data)
frame_result = image.copy()
frame_result = image_proc_a(frame_result)
bgr_image = cv2.cvtColor(frame_result, cv2.COLOR_RGB2BGR)
cv2.imshow('result', bgr_image)
cv2.waitKey(1)
def image_callback(ros_image):
try:
image_queue.put_nowait(ros_image)
except queue.Full:
pass
if __name__ == '__main__':
state = AprilTagDetect()
jetmax = hiwonder.JetMax()
sucker = hiwonder.Sucker()
at_detector = apriltag.Detector()
image_queue = queue.Queue(maxsize=1)
rospy.init_node(ROS_NODE_NAME, log_level=rospy.DEBUG)
jetmax.go_home()
state.load_camera_params()
if state.camera_params is None:
rospy.logerr('Can not load camera parameters')
sys.exit(-1)
rospy.ServiceProxy('/jetmax/go_home', Empty)()
rospy.Publisher('/jetmax/end_effector/sucker/command', Bool, queue_size=1).publish(data=False)
rospy.Publisher('/jetmax/end_effector/servo1/command', SetServo, queue_size=1).publish(data=90, duration=0.5)
image_sub = rospy.Subscriber('/usb_cam/image_rect_color', Image, image_callback)
while not rospy.is_shutdown():
image_proc_b()

259
src/Ai_JetMax/scripts/auto_reg.py Executable file
View File

@ -0,0 +1,259 @@
#!/usr/bin/env python3
import sys
import cv2
import math
import threading
import numpy as np
import rospy
from sensor_msgs.msg import Image
from std_msgs.msg import Bool
from std_srvs.srv import Empty
from std_srvs.srv import SetBool, SetBoolResponse, SetBoolRequest
from std_srvs.srv import Trigger, TriggerResponse, TriggerRequest
from color_sorting.srv import SetTarget, SetTargetResponse, SetTargetRequest
from std_msgs.msg import Bool
from jetmax_control.msg import SetServo
import hiwonder
ROS_NODE_NAME = "color_sorting"
IMAGE_PROC_SIZE = 640, 480
unit_block_corners = np.asarray([[0, 0, 0],
[20, -20, 0], # TAG_SIZE = 33.30mm
[-20, -20, 0],
[-20, 20, 0],
[20, 20, 0]],
dtype=np.float64)
unit_block_img_pts = None
def camera_to_world(cam_mtx, r, t, img_points):
inv_k = np.asmatrix(cam_mtx).I
r_mat = np.zeros((3, 3), dtype=np.float64)
cv2.Rodrigues(r, r_mat)
# invR * T
inv_r = np.asmatrix(r_mat).I # 3*3
transPlaneToCam = np.dot(inv_r, np.asmatrix(t)) # 3*3 dot 3*1 = 3*1
world_pt = []
coords = np.zeros((3, 1), dtype=np.float64)
for img_pt in img_points:
coords[0][0] = img_pt[0][0]
coords[1][0] = img_pt[0][1]
coords[2][0] = 1.0
worldPtCam = np.dot(inv_k, coords) # 3*3 dot 3*1 = 3*1
# [x,y,1] * invR
worldPtPlane = np.dot(inv_r, worldPtCam) # 3*3 dot 3*1 = 3*1
# zc
scale = transPlaneToCam[2][0] / worldPtPlane[2][0]
# zc * [x,y,1] * invR
scale_worldPtPlane = np.multiply(scale, worldPtPlane)
# [X,Y,Z]=zc*[x,y,1]*invR - invR*T
worldPtPlaneReproject = np.asmatrix(scale_worldPtPlane) - np.asmatrix(transPlaneToCam) # 3*1 dot 1*3 = 3*3
pt = np.zeros((3, 1), dtype=np.float64)
pt[0][0] = worldPtPlaneReproject[0][0]
pt[1][0] = worldPtPlaneReproject[1][0]
pt[2][0] = 0
world_pt.append(pt.T.tolist())
return world_pt
class ColorSortingState:
def __init__(self):
self.target_colors = {}
self.target_positions = {
'red': ((238, -15, 95), -5),
'green': ((238, 35, 95), 8),
'blue': ((238, 85, 95), 18)
}
self.heartbeat_timer = None
self.is_running = False
self.image_sub = None
self.lock = threading.RLock()
self.runner = None
self.count = 0
self.camera_params = None
self.K = None
self.R = None
self.T = None
self.WIDTH = None
def load_camera_params(self):
global unit_block_img_pts
self.camera_params = rospy.get_param('/camera_cal/block_params', self.camera_params)
if self.camera_params is not None:
self.K = np.array(self.camera_params['K'], dtype=np.float64).reshape(3, 3)
self.R = np.array(self.camera_params['R'], dtype=np.float64).reshape(3, 1)
self.T = np.array(self.camera_params['T'], dtype=np.float64).reshape(3, 1)
img_pts, jac = cv2.projectPoints(unit_block_corners, self.R, self.T, self.K, None)
unit_block_img_pts = img_pts.reshape(5, 2)
l_p1 = unit_block_img_pts[-1]
l_p2 = unit_block_img_pts[-2]
self.WIDTH = math.sqrt((l_p1[0] - l_p2[0]) ** 2 + (l_p1[1] - l_p2[1]) ** 2)
print(unit_block_img_pts)
def moving(rect):
cur_x, cur_y, cur_z = jetmax.position
try:
x, y, _ = camera_to_world(state.K, state.R, state.T, np.array(rect[0]).reshape((1, 1, 2)))[0][0]
# Calculate the distance between the current position and the target position to control the movement speed
t = math.sqrt(x * x + y * y + 120 * 120) / 140
angle = rect[2]
new_x, new_y = cur_x + x, cur_y + y
# Pick up the block
hiwonder.pwm_servo1.set_position(90 + angle, 0.1) # 90度时吸嘴的默认角度, 先偏转再回到九十度就摆正了
jetmax.set_position((new_x, new_y, 120), t) #到达木块上方
rospy.sleep(t)
sucker.set_state(True) #吸取模块
jetmax.set_position((new_x, new_y, 85), 1)
rospy.sleep(1.05)
cur_x, cur_y, cur_z = jetmax.position
jetmax.set_position((cur_x, cur_y, 120), 0.8) #抬起机械臂
rospy.sleep(1)
hiwonder.pwm_servo1.set_position(90, 0.1) #吸嘴角度回正
rospy.sleep(0.2)
# Go to the target position
jetmax.set_position((cur_x, cur_y, 85), 1) #重新放下
rospy.sleep(1.1)
sucker.release(3) #放开吸嘴
jetmax.set_position((cur_x, cur_y, 120), 0.8) #抬起机械臂
rospy.sleep(0.8)
except Exception as e:
rospy.logerr("ERROR")
finally:
# 回中位
sucker.release(3)
hiwonder.pwm_servo1.set_position(90, 0.5)
jetmax.go_home(2)
rospy.sleep(2.5)
state.runner = None
def point_xy(pt_a, pt_b, r):
x_a, y_a = pt_a
x_b, y_b = pt_b
if x_a == x_b:
return x_a, y_a + (r / abs((y_b - y_a))) * (y_b - y_a)
k = (y_a - y_b) / (x_a - x_b)
b = y_a - k * x_a
A = k ** 2 + 1
B = 2 * ((b - y_a) * k - x_a)
C = (b - y_a) ** 2 + x_a ** 2 - r ** 2
x1 = (-B + math.sqrt(B ** 2 - 4 * A * C)) / (2 * A)
x2 = (-B - math.sqrt(B ** 2 - 4 * A * C)) / (2 * A)
y1 = k * x1 + b
y2 = k * x2 + b
dist_1 = math.sqrt((x1 - x_b) ** 2 + (y1 - y_b) ** 2)
dist_2 = math.sqrt((x2 - x_b) ** 2 + (y2 - y_b) ** 2)
if dist_1 <= dist_2:
return x1, y1
else:
return x2, y2
def image_proc(img):
if state.runner is not None:
return img
img_h, img_w = img.shape[:2]
frame_gb = cv2.GaussianBlur(np.copy(img), (5, 5), 5)
frame_lab = cv2.cvtColor(frame_gb, cv2.COLOR_RGB2LAB) # Convert rgb to lab
blocks = []
for color_name, color in state.target_colors.items(): # Loop through all selected colors
frame_mask = cv2.inRange(frame_lab, tuple(color['min']), tuple(color['max']))
eroded = cv2.erode(frame_mask, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))
dilated = cv2.dilate(eroded, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))
contours = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2]
contour_area = map(lambda c: (c, math.fabs(cv2.contourArea(c))), contours)
contour_area = list(filter(lambda c: c[1] > 1000, contour_area)) # Eliminate contours that are too small
if len(contour_area) > 0:
for contour, area in contour_area: # Loop through all the contours found
rect = cv2.minAreaRect(contour)
center_x, center_y = rect[0]
box = cv2.boxPoints(rect) # The four vertices of the minimum-area-rectangle
box_list = box.tolist()
box = np.int0(box)
ap = max(box_list, key=lambda p: math.sqrt((p[0] - state.K[0][2]) ** 2 + (p[1] - state.K[1][2]) ** 2))
index_ap = box_list.index(ap)
p1 = box_list[index_ap - 1 if index_ap - 1 >= 0 else 3]
p2 = box_list[index_ap + 1 if index_ap + 1 <= 3 else 0]
n_p1 = point_xy(ap, p1, state.WIDTH)
n_p2 = point_xy(ap, p2, state.WIDTH)
c_x, c_y = None, None
if n_p1 and n_p2:
x_1, y_1 = n_p1
x_2, y_2 = n_p2
c_x = (x_1 + x_2) / 2
c_y = (y_1 + y_2) / 2
cv2.circle(img, (int(n_p1[0]), int(n_p1[1])), 2, (255, 255, 0), 10)
cv2.circle(img, (int(n_p2[0]), int(n_p2[1])), 2, (255, 255, 0), 10)
cv2.circle(img, (int(c_x), int(c_y)), 2, (0, 0, 0), 10)
cv2.circle(img, (int(ap[0]), int(ap[1])), 2, (0, 255, 255), 10)
cv2.drawContours(img, [box], -1, hiwonder.COLORS[color_name.upper()], 2)
cv2.circle(img, (int(center_x), int(center_y)), 1, hiwonder.COLORS[color_name.upper()], 5)
rect = list(rect)
if c_x:
rect[0] = c_x, c_y
else:
rect[0] = (center_x, center_y)
x, y = rect[0]
cv2.circle(img, (int(x), int(y)), 2, (255, 255, 255), 5)
angle = rect[2]
rect[2] = angle - 90 if angle > 45 else angle
blocks.append((rect, color_name))
if len(blocks) > 0:
#print(blocks)
for block in blocks:
rect, color_name = block
(x, y), _, angle = rect
print(angle, end=', ')
if abs(angle) > 5:
print("start x:{:0.2f}, y:{:0.2f}, angle:{:0.2f}".format(x, y, angle))
state.runner = threading.Thread(target=moving, args=(rect, ), daemon=True)
state.runner.start()
print("\n")
cv2.line(img, (int(img_w / 2 - 10), int(img_h / 2)), (int(img_w / 2 + 10), int(img_h / 2)), (0, 255, 255), 2)
cv2.line(img, (int(img_w / 2), int(img_h / 2 - 10)), (int(img_w / 2), int(img_h / 2 + 10)), (0, 255, 255), 2)
return img
def image_callback(ros_image):
image = np.ndarray(shape=(ros_image.height, ros_image.width, 3), dtype=np.uint8, buffer=ros_image.data)
frame_result = np.copy(image)
frame_result = image_proc(frame_result)
image_bgr = cv2.cvtColor(frame_result, cv2.COLOR_RGB2BGR)
cv2.imshow('result', image_bgr)
cv2.waitKey(1)
if __name__ == '__main__':
rospy.init_node(ROS_NODE_NAME, log_level=rospy.DEBUG)
state = ColorSortingState()
state.load_camera_params()
if state.camera_params is None:
rospy.logerr("Can not load camera params")
sys.exit(-1)
jetmax = hiwonder.JetMax()
sucker = hiwonder.Sucker()
jetmax.go_home()
rospy.sleep(1)
color_ranges = rospy.get_param('/lab_config_manager/color_range_list', {})
state.target_colors['red'] = color_ranges['red']
state.target_colors['green'] = color_ranges['green']
state.target_colors['blue'] = color_ranges['blue']
image_sub = rospy.Subscriber('/usb_cam/image_rect_color', Image, image_callback)
try:
rospy.spin()
except KeyboardInterrupt:
sys.exit(0)

View File

@ -0,0 +1,164 @@
#!/usr/bin/env python3
import sys
import cv2
import math
import threading
import numpy as np
import rospy
from sensor_msgs.msg import Image
import hiwonder
ROS_NODE_NAME = "color_angle"
unit_block_corners = np.asarray([[0, 0, 0],
[20, -20, 0], # TAG_SIZE = 33.30mm
[-20, -20, 0],
[-20, 20, 0],
[20, 20, 0]],
dtype=np.float64)
unit_block_img_pts = None
def camera_to_world(cam_mtx, r, t, img_points):
inv_k = np.asmatrix(cam_mtx).I
r_mat = np.zeros((3, 3), dtype=np.float64)
cv2.Rodrigues(r, r_mat)
# invR * T
inv_r = np.asmatrix(r_mat).I # 3*3
transPlaneToCam = np.dot(inv_r, np.asmatrix(t)) # 3*3 dot 3*1 = 3*1
world_pt = []
coords = np.zeros((3, 1), dtype=np.float64)
for img_pt in img_points:
coords[0][0] = img_pt[0][0]
coords[1][0] = img_pt[0][1]
coords[2][0] = 1.0
worldPtCam = np.dot(inv_k, coords) # 3*3 dot 3*1 = 3*1
# [x,y,1] * invR
worldPtPlane = np.dot(inv_r, worldPtCam) # 3*3 dot 3*1 = 3*1
# zc
scale = transPlaneToCam[2][0] / worldPtPlane[2][0]
# zc * [x,y,1] * invR
scale_worldPtPlane = np.multiply(scale, worldPtPlane)
# [X,Y,Z]=zc*[x,y,1]*invR - invR*T
worldPtPlaneReproject = np.asmatrix(scale_worldPtPlane) - np.asmatrix(transPlaneToCam) # 3*1 dot 1*3 = 3*3
pt = np.zeros((3, 1), dtype=np.float64)
pt[0][0] = worldPtPlaneReproject[0][0]
pt[1][0] = worldPtPlaneReproject[1][0]
pt[2][0] = 0
world_pt.append(pt.T.tolist())
return world_pt
class State:
def __init__(self):
self.target_colors = {}
self.runner = None
self.camera_params = None
self.K = None
self.R = None
self.T = None
self.WIDTH = None
def load_camera_params(self):
global unit_block_img_pts
self.camera_params = rospy.get_param('/camera_cal/block_params', self.camera_params)
if self.camera_params is not None:
self.K = np.array(self.camera_params['K'], dtype=np.float64).reshape(3, 3)
self.R = np.array(self.camera_params['R'], dtype=np.float64).reshape(3, 1)
self.T = np.array(self.camera_params['T'], dtype=np.float64).reshape(3, 1)
img_pts, jac = cv2.projectPoints(unit_block_corners, self.R, self.T, self.K, None)
unit_block_img_pts = img_pts.reshape(5, 2)
l_p1 = unit_block_img_pts[-1]
l_p2 = unit_block_img_pts[-2]
self.WIDTH = math.sqrt((l_p1[0] - l_p2[0]) ** 2 + (l_p1[1] - l_p2[1]) ** 2)
print(unit_block_img_pts)
def point_xy(pt_a, pt_b, r):
x_a, y_a = pt_a
x_b, y_b = pt_b
if x_a == x_b:
return x_a, y_a + (r / abs((y_b - y_a))) * (y_b - y_a)
k = (y_a - y_b) / (x_a - x_b)
b = y_a - k * x_a
A = k ** 2 + 1
B = 2 * ((b - y_a) * k - x_a)
C = (b - y_a) ** 2 + x_a ** 2 - r ** 2
x1 = (-B + math.sqrt(B ** 2 - 4 * A * C)) / (2 * A)
x2 = (-B - math.sqrt(B ** 2 - 4 * A * C)) / (2 * A)
y1 = k * x1 + b
y2 = k * x2 + b
dist_1 = math.sqrt((x1 - x_b) ** 2 + (y1 - y_b) ** 2)
dist_2 = math.sqrt((x2 - x_b) ** 2 + (y2 - y_b) ** 2)
if dist_1 <= dist_2:
return x1, y1
else:
return x2, y2
def image_proc(img):
img_h, img_w = img.shape[:2]
frame_gb = cv2.GaussianBlur(np.copy(img), (5, 5), 5)
frame_lab = cv2.cvtColor(frame_gb, cv2.COLOR_RGB2LAB) # Convert rgb to lab
blocks = []
for color_name, color in state.target_colors.items(): # Loop through all selected colors
frame_mask = cv2.inRange(frame_lab, tuple(color['min']), tuple(color['max']))
eroded = cv2.erode(frame_mask, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))
dilated = cv2.dilate(eroded, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))
contours = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2]
contour_area = map(lambda c: (c, math.fabs(cv2.contourArea(c))), contours)
contour_area = list(filter(lambda c: c[1] > 1000, contour_area)) # Eliminate contours that are too small
if len(contour_area) > 0:
for contour, area in contour_area: # Loop through all the contours found
rect = cv2.minAreaRect(contour)
center_x, center_y = rect[0]
box = cv2.boxPoints(rect) # The four vertices of the minimum-area-rectangle
box_list = box.tolist()
box = np.int0(box)
cv2.drawContours(img, [box], -1, hiwonder.COLORS[color_name.upper()], 2)
cv2.circle(img, (int(center_x), int(center_y)), 1, hiwonder.COLORS[color_name.upper()], 5)
angle = rect[2]
angle = angle - 90 if angle > 45 else angle
s = "Angle:{:0.2f}deg".format(angle)
cv2.putText(img, s, (int(center_x), int(center_y)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)
print(s)
cv2.line(img, (int(img_w / 2 - 10), int(img_h / 2)), (int(img_w / 2 + 10), int(img_h / 2)), (0, 255, 255), 2)
cv2.line(img, (int(img_w / 2), int(img_h / 2 - 10)), (int(img_w / 2), int(img_h / 2 + 10)), (0, 255, 255), 2)
print("\n")
return img
def image_callback(ros_image):
image = np.ndarray(shape=(ros_image.height, ros_image.width, 3), dtype=np.uint8, buffer=ros_image.data)
frame_result = np.copy(image)
frame_result = image_proc(frame_result)
image_bgr = cv2.cvtColor(frame_result, cv2.COLOR_RGB2BGR)
cv2.imshow("result", image_bgr)
cv2.waitKey(1)
if __name__ == '__main__':
rospy.init_node(ROS_NODE_NAME, log_level=rospy.DEBUG)
state = State()
state.load_camera_params()
if state.camera_params is None:
rospy.logerr("Can not load camera params")
sys.exit(-1)
jetmax = hiwonder.JetMax()
jetmax.go_home()
rospy.sleep(1)
state.target_colors = rospy.get_param('/lab_config_manager/color_range_list', {})
del[state.target_colors['white']]
del[state.target_colors['black']]
del[state.target_colors['ball']]
del[state.target_colors['tennis']]
image_sub = rospy.Subscriber('/usb_cam/image_rect_color', Image, image_callback)
try:
rospy.spin()
except KeyboardInterrupt:
sys.exit(0)

View File

@ -0,0 +1,60 @@
#!/usr/bin/env python3
import sys
import cv2
import math
import numpy as np
from sensor_msgs.msg import Image
import hiwonder
import rospy
ROS_NODE_NAME = "color_detect"
def image_proc(img):
img_h, img_w = img.shape[:2]
frame_gb = cv2.GaussianBlur(np.copy(img), (5, 5), 5)
frame_lab = cv2.cvtColor(frame_gb, cv2.COLOR_RGB2LAB) # 转换rgb到lab
blocks = []
for color_name, color in target_colors.items(): # 遍历所有颜色阈值
frame_mask = cv2.inRange(frame_lab, tuple(color['min']), tuple(color['max']))
eroded = cv2.erode(frame_mask, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))
dilated = cv2.dilate(eroded, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))
contours = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2]
contour_area = map(lambda c: (c, math.fabs(cv2.contourArea(c))), contours)
contour_area = list(filter(lambda c: c[1] > 1200, contour_area)) # 去除过小的色块
if len(contour_area) > 0:
for contour, area in contour_area: # Loop through all the contours found
(center_x, center_y), r = cv2.minEnclosingCircle(contour)
cv2.circle(img, (int(center_x), int(center_y)), 1, hiwonder.COLORS[color_name.upper()], 5)
cv2.circle(img, (int(center_x), int(center_y)), int(r), hiwonder.COLORS[color_name.upper()], 2)
cv2.putText(img, color_name.upper(), (int(center_x), int(center_y)), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 255, 255), 2)
cv2.line(img, (int(img_w / 2 - 10), int(img_h / 2)), (int(img_w / 2 + 10), int(img_h / 2)), (0, 255, 255), 2)
cv2.line(img, (int(img_w / 2), int(img_h / 2 - 10)), (int(img_w / 2), int(img_h / 2 + 10)), (0, 255, 255), 2)
return img
def image_callback(ros_image):
image = np.ndarray(shape=(ros_image.height, ros_image.width, 3), dtype=np.uint8, buffer=ros_image.data)
frame_result = np.copy(image)
frame_result = image_proc(frame_result)
image_bgr = cv2.cvtColor(frame_result, cv2.COLOR_RGB2BGR)
cv2.imshow("result", image_bgr)
cv2.waitKey(1)
if __name__ == '__main__':
rospy.init_node(ROS_NODE_NAME, log_level=rospy.DEBUG)
jetmax = hiwonder.JetMax()
jetmax.go_home()
rospy.sleep(1)
target_colors = rospy.get_param('/lab_config_manager/color_range_list', {})
del[target_colors['white']]
del[target_colors['black']]
image_sub = rospy.Subscriber("/usb_cam/image_rect_color", Image, image_callback, queue_size=1)
try:
rospy.spin()
except KeyboardInterrupt:
sys.exit(0)

View File

@ -0,0 +1,193 @@
#!/usr/bin/env python3
import sys
import cv2
import math
import threading
import numpy as np
import rospy
from sensor_msgs.msg import Image
from std_msgs.msg import Bool
from std_srvs.srv import Empty
from std_srvs.srv import SetBool, SetBoolResponse, SetBoolRequest
from std_srvs.srv import Trigger, TriggerResponse, TriggerRequest
from color_sorting.srv import SetTarget, SetTargetResponse, SetTargetRequest
from std_msgs.msg import Bool
from jetmax_control.msg import SetServo
import hiwonder
ROS_NODE_NAME = "color_pos"
IMAGE_PROC_SIZE = 640, 480
unit_block_corners = np.asarray([[0, 0, 0],
[20, -20, 0], # TAG_SIZE = 33.30mm
[-20, -20, 0],
[-20, 20, 0],
[20, 20, 0]],
dtype=np.float64)
unit_block_img_pts = None
def camera_to_world(cam_mtx, r, t, img_points):
inv_k = np.asmatrix(cam_mtx).I
r_mat = np.zeros((3, 3), dtype=np.float64)
cv2.Rodrigues(r, r_mat)
# invR * T
inv_r = np.asmatrix(r_mat).I # 3*3
transPlaneToCam = np.dot(inv_r, np.asmatrix(t)) # 3*3 dot 3*1 = 3*1
world_pt = []
coords = np.zeros((3, 1), dtype=np.float64)
for img_pt in img_points:
coords[0][0] = img_pt[0][0]
coords[1][0] = img_pt[0][1]
coords[2][0] = 1.0
worldPtCam = np.dot(inv_k, coords) # 3*3 dot 3*1 = 3*1
# [x,y,1] * invR
worldPtPlane = np.dot(inv_r, worldPtCam) # 3*3 dot 3*1 = 3*1
# zc
scale = transPlaneToCam[2][0] / worldPtPlane[2][0]
# zc * [x,y,1] * invR
scale_worldPtPlane = np.multiply(scale, worldPtPlane)
# [X,Y,Z]=zc*[x,y,1]*invR - invR*T
worldPtPlaneReproject = np.asmatrix(scale_worldPtPlane) - np.asmatrix(transPlaneToCam) # 3*1 dot 1*3 = 3*3
pt = np.zeros((3, 1), dtype=np.float64)
pt[0][0] = worldPtPlaneReproject[0][0]
pt[1][0] = worldPtPlaneReproject[1][0]
pt[2][0] = 0
world_pt.append(pt.T.tolist())
return world_pt
class State:
def __init__(self):
self.target_colors = {}
self.runner = None
self.camera_params = None
self.K = None
self.R = None
self.T = None
self.WIDTH = None
def load_camera_params(self):
global unit_block_img_pts
self.camera_params = rospy.get_param('/camera_cal/block_params', self.camera_params)
if self.camera_params is not None:
self.K = np.array(self.camera_params['K'], dtype=np.float64).reshape(3, 3)
self.R = np.array(self.camera_params['R'], dtype=np.float64).reshape(3, 1)
self.T = np.array(self.camera_params['T'], dtype=np.float64).reshape(3, 1)
img_pts, jac = cv2.projectPoints(unit_block_corners, self.R, self.T, self.K, None)
unit_block_img_pts = img_pts.reshape(5, 2)
l_p1 = unit_block_img_pts[-1]
l_p2 = unit_block_img_pts[-2]
self.WIDTH = math.sqrt((l_p1[0] - l_p2[0]) ** 2 + (l_p1[1] - l_p2[1]) ** 2)
print(unit_block_img_pts)
def point_xy(pt_a, pt_b, r):
x_a, y_a = pt_a
x_b, y_b = pt_b
if x_a == x_b:
return x_a, y_a + (r / abs((y_b - y_a))) * (y_b - y_a)
k = (y_a - y_b) / (x_a - x_b)
b = y_a - k * x_a
A = k ** 2 + 1
B = 2 * ((b - y_a) * k - x_a)
C = (b - y_a) ** 2 + x_a ** 2 - r ** 2
x1 = (-B + math.sqrt(B ** 2 - 4 * A * C)) / (2 * A)
x2 = (-B - math.sqrt(B ** 2 - 4 * A * C)) / (2 * A)
y1 = k * x1 + b
y2 = k * x2 + b
dist_1 = math.sqrt((x1 - x_b) ** 2 + (y1 - y_b) ** 2)
dist_2 = math.sqrt((x2 - x_b) ** 2 + (y2 - y_b) ** 2)
if dist_1 <= dist_2:
return x1, y1
else:
return x2, y2
def image_proc(img):
img_h, img_w = img.shape[:2]
frame_gb = cv2.GaussianBlur(np.copy(img), (5, 5), 5)
frame_lab = cv2.cvtColor(frame_gb, cv2.COLOR_RGB2LAB) # Convert rgb to lab
blocks = []
for color_name, color in state.target_colors.items(): # Loop through all selected colors
frame_mask = cv2.inRange(frame_lab, tuple(color['min']), tuple(color['max']))
eroded = cv2.erode(frame_mask, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))
dilated = cv2.dilate(eroded, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))
contours = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2]
contour_area = map(lambda c: (c, math.fabs(cv2.contourArea(c))), contours)
contour_area = list(filter(lambda c: c[1] > 1000, contour_area)) # Eliminate contours that are too small
if len(contour_area) > 0:
for contour, area in contour_area: # Loop through all the contours found
rect = cv2.minAreaRect(contour)
center_x, center_y = rect[0]
box = cv2.boxPoints(rect) # The four vertices of the minimum-area-rectangle
box_list = box.tolist()
box = np.int0(box)
ap = max(box_list, key=lambda p: math.sqrt((p[0] - state.K[0][2]) ** 2 + (p[1] - state.K[1][2]) ** 2))
index_ap = box_list.index(ap)
p1 = box_list[index_ap - 1 if index_ap - 1 >= 0 else 3]
p2 = box_list[index_ap + 1 if index_ap + 1 <= 3 else 0]
n_p1 = point_xy(ap, p1, state.WIDTH)
n_p2 = point_xy(ap, p2, state.WIDTH)
c_x, c_y = None, None
if n_p1 and n_p2:
x_1, y_1 = n_p1
x_2, y_2 = n_p2
c_x = (x_1 + x_2) / 2
c_y = (y_1 + y_2) / 2
cv2.circle(img, (int(n_p1[0]), int(n_p1[1])), 2, (255, 255, 0), 10)
cv2.circle(img, (int(n_p2[0]), int(n_p2[1])), 2, (255, 255, 0), 10)
cv2.circle(img, (int(c_x), int(c_y)), 2, (0, 0, 0), 10)
cv2.circle(img, (int(ap[0]), int(ap[1])), 2, (0, 255, 255), 10)
cv2.drawContours(img, [box], -1, hiwonder.COLORS[color_name.upper()], 2)
cv2.circle(img, (int(center_x), int(center_y)), 1, hiwonder.COLORS[color_name.upper()], 5)
rect = list(rect)
if c_x:
rect[0] = c_x, c_y
else:
rect[0] = (center_x, center_y)
x, y = rect[0]
cv2.circle(img, (int(x), int(y)), 1, (255, 255, 255), 5)
real_x, real_y, _ = camera_to_world(state.K, state.R, state.T, np.array((x, y)).reshape((1, 1, 2)))[0][0]
s = "{} x:{:0.2f} y:{:0.2f}".format(color_name.upper(), real_x, real_y)
print(s)
cv2.putText(img, s, (int(center_x), int(center_y)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)
cv2.line(img, (int(img_w / 2 - 10), int(img_h / 2)), (int(img_w / 2 + 10), int(img_h / 2)), (0, 255, 255), 2)
cv2.line(img, (int(img_w / 2), int(img_h / 2 - 10)), (int(img_w / 2), int(img_h / 2 + 10)), (0, 255, 255), 2)
print("\n")
return img
def image_callback(ros_image):
image = np.ndarray(shape=(ros_image.height, ros_image.width, 3), dtype=np.uint8, buffer=ros_image.data)
frame_result = np.copy(image)
frame_result = image_proc(frame_result)
image_bgr = cv2.cvtColor(frame_result, cv2.COLOR_RGB2BGR)
cv2.imshow("result", image_bgr)
cv2.waitKey(1)
if __name__ == '__main__':
rospy.init_node(ROS_NODE_NAME, log_level=rospy.DEBUG)
state = State()
state.load_camera_params()
if state.camera_params is None:
rospy.logerr("Can not load camera params")
sys.exit(-1)
jetmax = hiwonder.JetMax()
jetmax.go_home()
rospy.sleep(1)
state.target_colors = rospy.get_param('/lab_config_manager/color_range_list', {})
del[state.target_colors['white']]
del[state.target_colors['black']]
image_sub = rospy.Subscriber('/usb_cam/image_rect_color', Image, image_callback)
try:
rospy.spin()
except KeyboardInterrupt:
sys.exit(0)

View File

@ -0,0 +1,88 @@
#!/usr/bin/env python3
import sys
import math
import rospy
import time
import queue
import threading
import cv2
import numpy as np
import hiwonder
from sensor_msgs.msg import Image as RosImage
from PIL import Image
import torch
from facenet_pytorch import MTCNN
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
mtcnn = MTCNN(keep_all=True, min_face_size=50, factor=0.709, post_process=False, device=device)
ROS_NODE_NAME = 'face_expression'
jetmax = hiwonder.JetMax()
image_queue = queue.Queue(maxsize=1)
def show_faces(img, boxes, landmarks, color):
new_boxes = []
new_landmarks = []
for bb, ll in zip(boxes, landmarks):
x1 = int(hiwonder.misc.val_map(bb[0], 0, 160, 0, 640))
y1 = int(hiwonder.misc.val_map(bb[1], 0, 160, 0, 480))
x2 = int(hiwonder.misc.val_map(bb[2], 0, 160, 0, 640))
y2 = int(hiwonder.misc.val_map(bb[3], 0, 160, 0, 480))
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
new_boxes.append([x1, y1, x2, y2])
landmarks_curr_face = []
if len(landmarks):
for j in range(5):
lx = int(hiwonder.misc.val_map(ll[j][0], 0, 160, 0, 640))
ly = int(hiwonder.misc.val_map(ll[j][1], 0, 160, 0, 480))
cv2.circle(img, (lx, ly), 2, color, 2)
landmarks_curr_face.append([lx, ly])
new_landmarks.append(landmarks_curr_face)
return img, new_boxes, new_landmarks
def image_proc_a(image):
img_ret = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
org_img1 = np.copy(image)
image = cv2.resize(image, (160, 160))
boxes, _, landmarks = mtcnn.detect(Image.fromarray(image), landmarks=True)
if boxes is None:
return img_ret
boxes = list(boxes)
landmarks = list(landmarks)
img_ret, boxes, landmarks = show_faces(img_ret, boxes, landmarks, (0, 255, 0))
return img_ret
def image_proc_b():
ros_image = image_queue.get(block=True)
image = np.ndarray(shape=(ros_image.height, ros_image.width, 3), dtype=np.uint8, buffer=ros_image.data)
image = image_proc_a(image)
toc = time.time()
cv2.imshow(ROS_NODE_NAME, image)
cv2.waitKey(1)
def image_callback(ros_image):
try:
image_queue.put_nowait(ros_image)
except queue.Full:
pass
if __name__ == '__main__':
rospy.init_node(ROS_NODE_NAME, log_level=rospy.DEBUG)
jetmax.go_home(1)
image_sub = rospy.Subscriber('/usb_cam/image_rect_color', RosImage, image_callback)
try:
while not rospy.is_shutdown():
image_proc_b()
except Exception as e:
rospy.logerr(e)
print(e.__traceback__.tb_lineno)
sys.exit()

View File

@ -0,0 +1,15 @@
#!/bin/bash
sudo kill -9 $(ps -elf|grep -v grep|grep python3|grep alphabetically|awk '{print $4}') >/dev/null 2>&1
sudo systemctl stop alphabetically.service
sudo kill -9 $(ps -elf|grep -v grep|grep python3|grep waste_classification|awk '{print $4}') >/dev/null 2>&1
sudo systemctl stop waste_classification.service
sudo kill -9 $(ps -elf|grep -v grep|grep python3|grep object_tracking|awk '{print $4}') >/dev/null 2>&1
sudo systemctl stop object_tracking.service
sudo kill -9 $(ps -elf|grep -v grep|grep python3|grep color_sorting|awk '{print $4}') >/dev/null 2>&1
sudo systemctl stop color_sorting.service

View File

@ -0,0 +1,95 @@
#!/usr/bin/env python3
import os
import sys
import cv2
import math
import time
import queue
import random
import threading
import numpy as np
import rospy
from sensor_msgs.msg import Image
from std_srvs.srv import SetBool, SetBoolResponse, SetBoolRequest
from std_srvs.srv import Trigger, TriggerResponse, TriggerRequest
import hiwonder
from hiwonder import serial_servo as ss
from yolov5_tensorrt import Yolov5TensorRT
ROS_NODE_NAME = "hiwjfkalsdjfkla"
IMAGE_SIZE = 640, 480
CHARACTERS_ENGINE_PATH = os.path.join(sys.path[0], 'models/characters_v5_160.trt')
CHARACTER_LABELS = tuple([i for i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'])
CHARACTER_NUM = 26
TRT_INPUT_SIZE = 160
COLORS = tuple([tuple([random.randint(10, 255) for j in range(3)]) for i in range(CHARACTER_NUM)])
TARGET_POSITION = (-200, -180, 65)
yolov5_chars = Yolov5TensorRT(CHARACTERS_ENGINE_PATH, TRT_INPUT_SIZE, CHARACTER_NUM)
def image_proc(img_in):
result_image = cv2.cvtColor(img_in, cv2.COLOR_RGB2BGR)
outputs = yolov5_chars.detect(np.copy(img_in))
boxes, confs, classes = yolov5_chars.post_process(img_in, outputs, 0.60)
for box, cls_id, cls_conf in zip(boxes, classes, confs):
x1 = box[0] / TRT_INPUT_SIZE * 640
y1 = box[1] / TRT_INPUT_SIZE * 480
x2 = box[2] / TRT_INPUT_SIZE * 640
y2 = box[3] / TRT_INPUT_SIZE * 480
char = CHARACTER_LABELS[cls_id]
cv2.putText(result_image, char + " " + str(float(cls_conf))[:4], (int(x1), int(y1) - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, COLORS[cls_id], 2)
cv2.rectangle(result_image, (int(x1), int(y1)), (int(x2), int(y2)), COLORS[cls_id], 3)
print(x1, y1, x2, y2, char)
return result_image
def show_fps(img=None):
global fps, fps_t0
"""Draw fps number at top-left corner of the image."""
# fps cal
fps_t1 = time.time()
fps_cur = (1.0 / (fps_t1 - fps_t0))
fps = fps_cur if fps == 0.0 else (fps * 0.8 + fps_cur * 0.2)
fps_t0 = fps_t1
font = cv2.FONT_HERSHEY_PLAIN
line = cv2.LINE_AA
fps_text = 'FPS: {:.2f}'.format(fps)
if img is not None:
cv2.putText(img, fps_text, (11, 20), font, 1.0, (32, 32, 32), 4, line)
cv2.putText(img, fps_text, (10, 20), font, 1.0, (240, 240, 240), 1, line)
return img
def image_proc_b():
ros_image = image_queue.get(block=True)
image = np.ndarray(shape=(ros_image.height, ros_image.width, 3), dtype=np.uint8, buffer=ros_image.data)
result_img = image_proc(image)
show_fps(result_img)
cv2.cvtColor(result_img, cv2.COLOR_RGB2BGR)
cv2.imshow(ROS_NODE_NAME, result_img)
cv2.waitKey(1)
def image_callback(ros_image):
try:
image_queue.put_nowait(ros_image)
except queue.Full:
pass
if __name__ == '__main__':
fps_t0 = time.time()
fps = 0
rospy.init_node(ROS_NODE_NAME, log_level=rospy.DEBUG)
image_queue = queue.Queue(maxsize=1)
jetmax = hiwonder.JetMax()
jetmax.go_home()
image_sub = rospy.Subscriber('/usb_cam/image_rect_color', Image, image_callback) # 订阅摄像头画面
while not rospy.is_shutdown():
image_proc_b()

View File

@ -0,0 +1 @@
/home/hiwonder/models

View File

@ -0,0 +1,62 @@
#!/usr/bin/env python3
import os
import sys
import cv2
import math
import rospy
import numpy as np
from sensor_msgs.msg import Image
import queue
import hiwonder
from yolov5_tensorrt import Yolov5TensorRT
import random
import threading
ROS_NODE_NAME = "virtual_calculator"
TRT_ENGINE_PATH = os.path.join(sys.path[0], "models/numbers_v5_160.trt")
TRT_INPUT_SIZE = 160
TRT_CLASS_NAMES = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '-', '*', '/', '=')
TRT_NUM_CLASSES = 15
COLORS = [(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) for i in range(TRT_NUM_CLASSES)]
def image_proc_a():
ros_image = image_queue.get(block=True)
image = np.ndarray(shape=(ros_image.height, ros_image.width, 3), dtype=np.uint8, buffer=ros_image.data)
outputs = yolov5.detect(image)
boxes, confs, classes = yolov5.post_process(image, outputs, 0.6)
width = image.shape[1]
height = image.shape[0]
for box, cls_conf, cls_id in zip(boxes, confs, classes):
x1 = int(box[0] / TRT_INPUT_SIZE * width)
y1 = int(box[1] / TRT_INPUT_SIZE * height)
x2 = int(box[2] / TRT_INPUT_SIZE * width)
y2 = int(box[3] / TRT_INPUT_SIZE * height)
card_name = str(TRT_CLASS_NAMES[cls_id])
cv2.putText(image, card_name + " " + str(float(cls_conf))[:4], (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX,
0.7, COLORS[cls_id], 2)
cv2.rectangle(image, (x1, y1), (x2, y2), COLORS[cls_id], 2)
print(x1, y1, x2, y2, card_name)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imshow(ROS_NODE_NAME, image)
cv2.waitKey(1)
def image_callback(ros_image):
try:
image_queue.put_nowait(ros_image)
except queue.Full:
pass
if __name__ == '__main__':
rospy.init_node(ROS_NODE_NAME, log_level=rospy.DEBUG)
yolov5 = Yolov5TensorRT(TRT_ENGINE_PATH, TRT_INPUT_SIZE, TRT_NUM_CLASSES)
jetmax = hiwonder.JetMax()
jetmax.go_home(2)
rospy.sleep(2)
image_queue = queue.Queue(maxsize=1)
image_sub = rospy.Subscriber('/usb_cam/image_rect_color', Image, image_callback, queue_size=1) # Subscribe to the camera
while not rospy.is_shutdown():
image_proc_a()

View File

@ -0,0 +1,96 @@
#!/usr/bin/env python3
import os
import sys
import cv2
import math
import rospy
import numpy as np
import threading
import queue
import hiwonder
from sensor_msgs.msg import Image
from std_srvs.srv import SetBool, SetBoolResponse, SetBoolRequest
from std_srvs.srv import Trigger, TriggerResponse, TriggerRequest
from std_srvs.srv import Empty
from std_msgs.msg import Bool
from jetmax_control.msg import SetServo
from yolov5_tensorrt import Yolov5TensorRT
ROS_NODE_NAME = "waste_classification_1"
TRT_ENGINE_PATH = os.path.join(sys.path[0], "models/waste_v5_160.trt")
TRT_INPUT_SIZE = 160
TRT_CLASS_NAMES = ('Banana Peel', 'Broken Bones', 'Cigarette End', 'Disposable Chopsticks',
'Ketchup', 'Marker', 'Oral Liquid Bottle', 'Plate',
'Plastic Bottle', 'Storage Battery', 'Toothbrush', 'Umbrella')
TRT_NUM_CLASSES = 12
WASTE_CLASSES = {
'food_waste': ('Banana Peel', 'Broken Bones', 'Ketchup'),
'hazardous_waste': ('Marker', 'Oral Liquid Bottle', 'Storage Battery'),
'recyclable_waste': ('Plastic Bottle', 'Toothbrush', 'Umbrella'),
'residual_waste': ('Plate', 'Cigarette End', 'Disposable Chopsticks'),
}
COLORS = {
'recyclable_waste': (0, 0, 255),
'hazardous_waste': (255, 0, 0),
'food_waste': (0, 255, 0),
'residual_waste': (80, 80, 80)
}
TARGET_POSITION = {
'recyclable_waste': (170, -65, 65, 65),
'hazardous_waste': (170, -15, 65, 85),
'food_waste': (170, 35, 65, 100),
'residual_waste': (170, 85, 65, 118)
}
def image_proc():
ros_image = image_queue.get(block=True)
image = np.ndarray(shape=(ros_image.height, ros_image.width, 3), dtype=np.uint8, buffer=ros_image.data)
outputs = yolov5.detect(image)
boxes, confs, classes = yolov5.post_process(image, outputs, 0.65)
width = image.shape[1]
height = image.shape[0]
cards = []
for box, cls_conf, cls_id in zip(boxes, confs, classes):
x1 = int(box[0] / TRT_INPUT_SIZE * width)
y1 = int(box[1] / TRT_INPUT_SIZE * height)
x2 = int(box[2] / TRT_INPUT_SIZE * width)
y2 = int(box[3] / TRT_INPUT_SIZE * height)
waste_name = TRT_CLASS_NAMES[cls_id]
waste_class_name = ''
for k, v in WASTE_CLASSES.items():
if waste_name in v:
waste_class_name = k
break
cards.append((cls_conf, x1, y1, x2, y2, waste_class_name))
cv2.putText(image, waste_class_name, (x1, y1 - 25),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, COLORS[waste_class_name], 2)
cv2.putText(image, waste_name + "{:0.2f}".format(float(cls_conf)), (x1, y1 - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, COLORS[waste_class_name], 2)
cv2.rectangle(image, (x1, y1), (x2, y2), COLORS[waste_class_name], 3)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imshow('result', image)
cv2.waitKey(1)
def image_callback(ros_image):
try:
image_queue.put_nowait(ros_image)
except queue.Full:
pass
if __name__ == '__main__':
rospy.init_node(ROS_NODE_NAME, log_level=rospy.DEBUG)
image_queue = queue.Queue(maxsize=2)
jetmax = hiwonder.JetMax()
jetmax.go_home(2)
rospy.sleep(2)
yolov5 = Yolov5TensorRT(TRT_ENGINE_PATH, TRT_INPUT_SIZE, TRT_NUM_CLASSES)
image_sub = rospy.Subscriber('/usb_cam/image_rect_color', Image, image_callback)
while not rospy.is_shutdown():
image_proc()

View File

@ -0,0 +1,207 @@
import cv2
import sys
import os
import tensorrt as trt
import pycuda.autoinit
import pycuda.driver as cuda
import numpy as np
import math
# Simple helper data class that's a little nicer to use than a 2-tuple.
class HostDeviceMem:
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
def sigmoid_v(array):
return np.reciprocal(np.exp(-array) + 1.0)
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def non_max_suppression(boxes, confs, classes, iou_thres=0.6):
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = confs.flatten().argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= iou_thres)[0]
order = order[inds + 1]
boxes = boxes[keep]
confs = confs[keep]
classes = classes[keep]
return boxes, confs, classes
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = np.zeros_like(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def nms(pred, iou_thres=0.4):
boxes = xywh2xyxy(pred[..., 0:4])
# best class only
confs = np.amax(pred[:, 5:], 1, keepdims=True)
classes = np.argmax(pred[:, 5:], axis=-1)
return non_max_suppression(boxes, confs, classes)
def make_grid(nx, ny):
"""
Create scaling tensor based on box location
Source: https://github.com/ultralytics/yolov5/blob/master/models/yolo.py
Arguments
nx: x-axis num boxes
ny: y-axis num boxes
Returns
grid: tensor of shape (1, 1, nx, ny, 80)
"""
nx_vec = np.arange(nx)
ny_vec = np.arange(ny)
yv, xv = np.meshgrid(ny_vec, nx_vec)
grid = np.stack((yv, xv), axis=2)
grid = grid.reshape(1, 1, ny, nx, 2)
return grid
def pre_process(img_in, w, h):
img_in = cv2.resize(img_in, (w, h), interpolation=cv2.INTER_LINEAR)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = img.transpose((2, 0, 1)).astype(np.float16)
img_in = np.transpose(img_in, (2, 0, 1)).astype(np.float32)
img_in = np.expand_dims(img_in, axis=0)
img_in /= 255.0
img_in = np.ascontiguousarray(img_in)
return img_in
class Yolov5TensorRT:
def __init__(self, model, input_size, classes_num):
# load tensorrt engine
self.input_size = input_size
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
with open(model, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime:
engine = runtime.deserialize_cuda_engine(f.read())
self.context = engine.create_execution_context()
# allocate memory
inputs, outputs, bindings = [], [], []
stream = cuda.Stream()
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding))
dtype = trt.nptype(engine.get_binding_dtype(binding))
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
bindings.append(int(device_mem))
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
# save to class
self.inputs = inputs
self.outputs = outputs
self.bindings = bindings
self.stream = stream
# post processing config
self.strides = np.array([8., 16., 32.])
anchors = np.array([
[[10, 13], [16, 30], [33, 23]],
[[30, 61], [62, 45], [59, 119]],
[[116, 90], [156, 198], [373, 326]],
])
self.nl = len(anchors)
self.nc = classes_num # classes
self.no = self.nc + 5 # outputs per anchor
self.na = len(anchors[0])
a = anchors.copy().astype(np.float32)
a = a.reshape(self.nl, -1, 2)
self.anchors = a.copy()
self.anchor_grid = a.copy().reshape(self.nl, 1, -1, 1, 1, 2)
self.output_shapes = [
(1, 3, int(input_size / 8), int(input_size / 8), self.nc + 5),
(1, 3, int(input_size / 16), int(input_size / 16), self.nc + 5),
(1, 3, int(input_size / 32), int(input_size / 32), self.nc + 5)
]
def detect(self, img):
shape_orig_WH = (img.shape[1], img.shape[0])
resized = pre_process(img, self.input_size, self.input_size)
outputs = self.inference(resized)
# reshape from flat to (1, 3, x, y, 85)
reshaped = []
for output, shape in zip(outputs, self.output_shapes):
reshaped.append(output.reshape(shape))
return reshaped
def inference(self, img):
# copy img to input memory
# self.inputs[0]['host'] = np.ascontiguousarray(img)
self.inputs[0].host = np.ravel(img)
# transfer data to the gpu
[cuda.memcpy_htod_async(inp.device, inp.host, self.stream) for inp in self.inputs]
# run inference
self.context.execute_async_v2(bindings=self.bindings, stream_handle=self.stream.handle)
# fetch outputs from gpu
[cuda.memcpy_dtoh_async(out.host, out.device, self.stream) for out in self.outputs]
# synchronize stream
self.stream.synchronize()
return [out.host for out in self.outputs]
def post_process(self, image, outputs, conf_thres=0.2):
"""
Transforms raw output into boxes, confs, classes
Applies NMS thresholding on bounding boxes and confs
Parameters:
output: raw output tensor
Returns:
boxes: x1,y1,x2,y2 tensor (dets, 4)
confs: class * obj prob tensor (dets, 1)
classes: class type tensor (dets, 1)
"""
scaled = []
grids = []
for out in outputs:
out = sigmoid_v(out)
_, _, width, height, _ = out.shape
grid = make_grid(width, height)
grids.append(grid)
scaled.append(out)
z = []
for out, grid, stride, anchor in zip(scaled, grids, self.strides, self.anchor_grid):
_, _, width, height, _ = out.shape
out[..., 0:2] = (out[..., 0:2] * 2. - 0.5 + grid) * stride
out[..., 2:4] = (out[..., 2:4] * 2) ** 2 * anchor
out = out.reshape((1, 3 * width * height, self.no))
z.append(out)
pred = np.concatenate(z, 1)
xc = pred[..., 4] > conf_thres
pred = pred[xc]
return nms(pred)

69
src/CMakeLists.txt Normal file
View File

@ -0,0 +1,69 @@
# toplevel CMakeLists.txt for a catkin workspace
# catkin/cmake/toplevel.cmake
cmake_minimum_required(VERSION 3.0.2)
project(Project)
set(CATKIN_TOPLEVEL TRUE)
# search for catkin within the workspace
set(_cmd "catkin_find_pkg" "catkin" "${CMAKE_SOURCE_DIR}")
execute_process(COMMAND ${_cmd}
RESULT_VARIABLE _res
OUTPUT_VARIABLE _out
ERROR_VARIABLE _err
OUTPUT_STRIP_TRAILING_WHITESPACE
ERROR_STRIP_TRAILING_WHITESPACE
)
if(NOT _res EQUAL 0 AND NOT _res EQUAL 2)
# searching fot catkin resulted in an error
string(REPLACE ";" " " _cmd_str "${_cmd}")
message(FATAL_ERROR "Search for 'catkin' in workspace failed (${_cmd_str}): ${_err}")
endif()
# include catkin from workspace or via find_package()
if(_res EQUAL 0)
set(catkin_EXTRAS_DIR "${CMAKE_SOURCE_DIR}/${_out}/cmake")
# include all.cmake without add_subdirectory to let it operate in same scope
include(${catkin_EXTRAS_DIR}/all.cmake NO_POLICY_SCOPE)
add_subdirectory("${_out}")
else()
# use either CMAKE_PREFIX_PATH explicitly passed to CMake as a command line argument
# or CMAKE_PREFIX_PATH from the environment
if(NOT DEFINED CMAKE_PREFIX_PATH)
if(NOT "$ENV{CMAKE_PREFIX_PATH}" STREQUAL "")
if(NOT WIN32)
string(REPLACE ":" ";" CMAKE_PREFIX_PATH $ENV{CMAKE_PREFIX_PATH})
else()
set(CMAKE_PREFIX_PATH $ENV{CMAKE_PREFIX_PATH})
endif()
endif()
endif()
# list of catkin workspaces
set(catkin_search_path "")
foreach(path ${CMAKE_PREFIX_PATH})
if(EXISTS "${path}/.catkin")
list(FIND catkin_search_path ${path} _index)
if(_index EQUAL -1)
list(APPEND catkin_search_path ${path})
endif()
endif()
endforeach()
# search for catkin in all workspaces
set(CATKIN_TOPLEVEL_FIND_PACKAGE TRUE)
find_package(catkin QUIET
NO_POLICY_SCOPE
PATHS ${catkin_search_path}
NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
unset(CATKIN_TOPLEVEL_FIND_PACKAGE)
if(NOT catkin_FOUND)
message(FATAL_ERROR "find_package(catkin) failed. catkin was neither found in the workspace nor in the CMAKE_PREFIX_PATH. One reason may be that no ROS setup.sh was sourced before.")
endif()
endif()
catkin_workspace()

203
src/Sensor/CMakeLists.txt Normal file
View File

@ -0,0 +1,203 @@
cmake_minimum_required(VERSION 3.0.2)
project(Sensor)
## Compile as C++11, supported in ROS Kinetic and newer
# add_compile_options(-std=c++11)
## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
)
## System dependencies are found with CMake's conventions
# find_package(Boost REQUIRED COMPONENTS system)
## Uncomment this if the package has a setup.py. This macro ensures
## modules and global scripts declared therein get installed
## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
# catkin_python_setup()
################################################
## Declare ROS messages, services and actions ##
################################################
## To declare and build messages, services or actions from within this
## package, follow these steps:
## * Let MSG_DEP_SET be the set of packages whose message types you use in
## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
## * In the file package.xml:
## * add a build_depend tag for "message_generation"
## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
## but can be declared for certainty nonetheless:
## * add a exec_depend tag for "message_runtime"
## * In this file (CMakeLists.txt):
## * add "message_generation" and every package in MSG_DEP_SET to
## find_package(catkin REQUIRED COMPONENTS ...)
## * add "message_runtime" and every package in MSG_DEP_SET to
## catkin_package(CATKIN_DEPENDS ...)
## * uncomment the add_*_files sections below as needed
## and list every .msg/.srv/.action file to be processed
## * uncomment the generate_messages entry below
## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
## Generate messages in the 'msg' folder
# add_message_files(
# FILES
# Message1.msg
# Message2.msg
# )
## Generate services in the 'srv' folder
# add_service_files(
# FILES
# Service1.srv
# Service2.srv
# )
## Generate actions in the 'action' folder
# add_action_files(
# FILES
# Action1.action
# Action2.action
# )
## Generate added messages and services with any dependencies listed here
# generate_messages(
# DEPENDENCIES
# std_msgs # Or other packages containing msgs
# )
################################################
## Declare ROS dynamic reconfigure parameters ##
################################################
## To declare and build dynamic reconfigure parameters within this
## package, follow these steps:
## * In the file package.xml:
## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
## * In this file (CMakeLists.txt):
## * add "dynamic_reconfigure" to
## find_package(catkin REQUIRED COMPONENTS ...)
## * uncomment the "generate_dynamic_reconfigure_options" section below
## and list every .cfg file to be processed
## Generate dynamic reconfigure parameters in the 'cfg' folder
# generate_dynamic_reconfigure_options(
# cfg/DynReconf1.cfg
# cfg/DynReconf2.cfg
# )
###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if your package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
catkin_package(
# INCLUDE_DIRS include
# LIBRARIES Sensor
# CATKIN_DEPENDS python
# DEPENDS system_lib
)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
include_directories(
# include
${catkin_INCLUDE_DIRS}
)
## Declare a C++ library
# add_library(${PROJECT_NAME}
# src/${PROJECT_NAME}/Sensor.cpp
# )
## Add cmake target dependencies of the library
## as an example, code may need to be generated before libraries
## either from message generation or dynamic reconfigure
# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Declare a C++ executable
## With catkin_make all packages are built within a single CMake context
## The recommended prefix ensures that target names across packages don't collide
# add_executable(${PROJECT_NAME}_node src/Sensor_node.cpp)
## Rename C++ executable without prefix
## The above recommended prefix causes long target names, the following renames the
## target back to the shorter version for ease of user use
## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
## Add cmake target dependencies of the executable
## same as for the library above
# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Specify libraries to link a library or executable target against
# target_link_libraries(${PROJECT_NAME}_node
# ${catkin_LIBRARIES}
# )
#############
## Install ##
#############
# all install targets should use catkin DESTINATION variables
# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
## Mark executable scripts (Python etc.) for installation
## in contrast to setup.py, you can choose the destination
# catkin_install_python(PROGRAMS
# scripts/my_python_script
# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark executables for installation
## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
# install(TARGETS ${PROJECT_NAME}_node
# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark libraries for installation
## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
# install(TARGETS ${PROJECT_NAME}
# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
# )
## Mark cpp header files for installation
# install(DIRECTORY include/${PROJECT_NAME}/
# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
# FILES_MATCHING PATTERN "*.h"
# PATTERN ".svn" EXCLUDE
# )
## Mark other files for installation (e.g. launch and bag files, etc.)
# install(FILES
# # myfile1
# # myfile2
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
# )
#############
## Testing ##
#############
## Add gtest based cpp test target and link libraries
# catkin_add_gtest(${PROJECT_NAME}-test test/test_Sensor.cpp)
# if(TARGET ${PROJECT_NAME}-test)
# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
# endif()
## Add folders to be run by python nosetests
# catkin_add_nosetests(test)

59
src/Sensor/package.xml Normal file
View File

@ -0,0 +1,59 @@
<?xml version="1.0"?>
<package format="2">
<name>Sensor</name>
<version>0.0.0</version>
<description>The Sensor package</description>
<!-- One maintainer tag required, multiple allowed, one person per tag -->
<!-- Example: -->
<!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
<maintainer email="hiwonder@todo.todo">hiwonder</maintainer>
<!-- One license tag required, multiple allowed, one license per tag -->
<!-- Commonly used license strings: -->
<!-- BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
<license>TODO</license>
<!-- Url tags are optional, but multiple are allowed, one per tag -->
<!-- Optional attribute type can be: website, bugtracker, or repository -->
<!-- Example: -->
<!-- <url type="website">http://wiki.ros.org/Sensor</url> -->
<!-- Author tags are optional, multiple are allowed, one per tag -->
<!-- Authors do not have to be maintainers, but could be -->
<!-- Example: -->
<!-- <author email="jane.doe@example.com">Jane Doe</author> -->
<!-- The *depend tags are used to specify dependencies -->
<!-- Dependencies can be catkin packages or system dependencies -->
<!-- Examples: -->
<!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
<!-- <depend>roscpp</depend> -->
<!-- Note that this is equivalent to the following: -->
<!-- <build_depend>roscpp</build_depend> -->
<!-- <exec_depend>roscpp</exec_depend> -->
<!-- Use build_depend for packages you need at compile time: -->
<!-- <build_depend>message_generation</build_depend> -->
<!-- Use build_export_depend for packages you need in order to build against this package: -->
<!-- <build_export_depend>message_generation</build_export_depend> -->
<!-- Use buildtool_depend for build tool packages: -->
<!-- <buildtool_depend>catkin</buildtool_depend> -->
<!-- Use exec_depend for packages you need at runtime: -->
<!-- <exec_depend>message_runtime</exec_depend> -->
<!-- Use test_depend for packages you need only for testing: -->
<!-- <test_depend>gtest</test_depend> -->
<!-- Use doc_depend for packages you need only for building documentation: -->
<!-- <doc_depend>doxygen</doc_depend> -->
<buildtool_depend>catkin</buildtool_depend>
<!-- The export tag contains other, unspecified, tags -->
<export>
<!-- Other tools can request additional information be placed here -->
</export>
</package>

1
src/Sensor/scripts/FER Symbolic link
View File

@ -0,0 +1 @@
/home/hiwonder/ros/src/jetmax_demos/scripts/FER

@ -0,0 +1 @@
Subproject commit cc2fe1bfae20fe2bcb9abfb6c3062af0b995b876

View File

@ -0,0 +1,38 @@
#!/usr/bin/env python3
import hiwonder
import time
"""
jetmax上的舵机的位置数值范围为01000, 对应0240
需要注意 舵机的位置数值和时间数值都需要用整数
"""
def main():
hiwonder.serial_servo.set_position(1, 600, 2000) #让id为1的舵机用2000毫秒时间从当前位置运动到600位置
time.sleep(1)
hiwonder.serial_servo.set_position(1, 300, 2000) #让id为1的舵机用2000毫秒时间从当前位置运动到300位置
time.sleep(1)
hiwonder.serial_servo.set_position(1, 500, 2000) #让id为1的舵机用2000毫秒时间从当前位置运动到500位置
time.sleep(1)
hiwonder.serial_servo.set_position(2, 600, 2000) #让id为1的舵机用2000毫秒时间从当前位置运动到600位置
time.sleep(1)
hiwonder.serial_servo.set_position(2, 300, 2000) #让id为1的舵机用2000毫秒时间从当前位置运动到300位置
time.sleep(1)
hiwonder.serial_servo.set_position(2, 500, 2000) #让id为1的舵机用2000毫秒时间从当前位置运动到500位置
time.sleep(1)
hiwonder.serial_servo.set_position(3, 300, 2000) #让id为1的舵机用2000毫秒时间从当前位置运动到300位置
time.sleep(1)
hiwonder.serial_servo.set_position(3, 600, 2000) #让id为1的舵机用2000毫秒时间从当前位置运动到600位置
time.sleep(1)
hiwonder.serial_servo.set_position(3, 500, 2000) #让id为1的舵机用2000毫秒时间从当前位置运动到500位置
time.sleep(1)
hiwonder.pwm_servo1.set_position(135, 1) #控制1号舵机用1秒转动到90度位置
time.sleep(1)
hiwonder.pwm_servo1.set_position(45, 1) #控制1号舵机用1秒转动到90度位置
time.sleep(1)
hiwonder.pwm_servo1.set_position(90, 1) #控制1号舵机用1秒转动到90度位置
time.sleep(1)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,70 @@
import time
# 假设jetmax是机械臂的控制模块
import hiwonder
jetmax = hiwonder.JetMax()
sucker = hiwonder.Sucker()
import sys
import tty
import termios
import time
# 假设jetmax是机械臂的控制模块
# from your_jetmax_library import jetmax
# 设置初始位置
position = [-100, -265.94, 80]
step = 5 # 每次移动的步长
time_to_move = 0.1 # 运动时间
# 控制机械臂的位置函数
def set_position(position, time_to_move):
print(f"Setting position to {position} in {time_to_move} seconds")
# 在这里调用机械臂的接口
jetmax.set_position(tuple(position), time_to_move)
# 获取单个字符输入的函数
def getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
print("Use WASD to move in the XY plane, R and F to move along Z. Press 'q' to quit.")
try:
while True:
char = getch()
if char == 'w':
position[1] += step # Y 轴增加
elif char == 's':
position[1] -= step # Y 轴减少
elif char == 'a':
position[0] -= step # X 轴减少
elif char == 'd':
position[0] += step # X 轴增加
elif char == 'r':
position[2] += step # Z 轴增加
elif char == 'f':
position[2] -= step # Z 轴减少
elif char == 'q':
break
set_position(position, time_to_move)
time.sleep(0.1)
except KeyboardInterrupt:
print("Program interrupted.")
finally:
print("Program exited.")

View File

@ -0,0 +1,28 @@
import hiwonder
import time
from PixelToRealworld.predefinedconstants import *
jetmax = hiwonder.JetMax()
sucker = hiwonder.Sucker()
jetmax.go_home(1)
time.sleep(1)
ort_point = (0, -162.94, 212.8 - 28)
new_point = ort_point
new_point = (new_point[0] , new_point[1] - 80, new_point[2] - 130)
ele_point = (-86, -115.94, 212.8)
# px, py = input("请输入像素坐标:").split()
# px, py = int(px), int(py)
target = pixel2robot(300, 165)[0]
# print(target)
# jetmax.set_position((target[0], target[1], 70), 1)
# jetmax.set_position((-75.41654829, -156.86319459, 70), 1)
jetmax.set_position(ort_point, 1)
time.sleep(1)
print(jetmax.position)

View File

@ -0,0 +1,141 @@
import time
import hiwonder
from PixelToRealworld.predefinedconstants import *
jetmax = hiwonder.JetMax()
sucker = hiwonder.Sucker()
pre_x, pre_y, pre_z = 123, -195, 65
ori_x, ori_y, ori_z = 0, -212, 60
stack_id = 0
STACK_P_X, STACK_P_Y = 125, -175.94
FIRST_Z = 70
class Stackbot:
def __init__(self):
self.stack_xyz = [STACK_P_X, STACK_P_Y]
self.ori_x, self.ori_y, self.ori_z = 0, -212, 60
self.first_z = 70
self.stack_pick_angle = 0
self.current_level = 0
def pick_stack(self, stack_id):
stack_xyz = [STACK_P_X, STACK_P_Y]
jetmax.go_home(1)
time.sleep(1)
hiwonder.pwm_servo1.set_position(180, 0.1)
time.sleep(0.5)
jetmax.set_position((stack_xyz[0], stack_xyz[1], FIRST_Z + 100), 1)
time.sleep(2)
sucker.suck()
jetmax.set_position((stack_xyz[0], stack_xyz[1], FIRST_Z - 5), 2)
time.sleep(3)
self.stack_pick_angle = hiwonder.serial_servo.read_position(1)
jetmax.set_position((stack_xyz[0], stack_xyz[1], pre_z + 100), 1)
time.sleep(1)
jetmax.go_home(1)
time.sleep(1)
# current_angle = hiwonder.serial_servo.read_position(1)
# hiwonder.pwm_servo1.set_position(180 + (current_angle-self.stack_pick_angle) * 0.25 , 0.1)
# time.sleep(1)
def place_stack(self, stack_id, px, py, theta, level):
stack_xyz = pixel2robot(px, py)[0]
stack_xyz = (stack_xyz[0], stack_xyz[1], FIRST_Z + level * 12)
jetmax.set_position((stack_xyz[0], stack_xyz[1], stack_xyz[2] + 100), 1)
time.sleep(1)
current_angle = hiwonder.serial_servo.read_position(1)
# hiwonder.pwm_servo1.set_position(180 + (current_angle-self.stack_pick_angle) * 0.25 - theta, 0.1)
print(current_angle, self.stack_pick_angle)
time.sleep(1)
if stack_id == 6:
hiwonder.pwm_servo1.set_position(180 + (current_angle-self.stack_pick_angle) * 0.25 + 30, 0.5)
else:
hiwonder.pwm_servo1.set_position(180 + (current_angle-self.stack_pick_angle) * 0.25 - theta, 0.5)
time.sleep(1)
jetmax.set_position((stack_xyz[0], stack_xyz[1], stack_xyz[2] + 10), 2)
time.sleep(2)
sucker.release()
if __name__ == '__main__':
# jetmax.go_home(1)
# time.sleep(1)
# jetmax.set_position((ori_x, ori_y, 200), 1)
# hiwonder.pwm_servo1.set_position(180, 0.1)
# pick_stack(1)
sucker.release()
stackbot = Stackbot()
stackbot.pick_stack(1)
stackbot.place_stack(1, 379, 209, 30, 0)
stackbot.pick_stack(2)
stackbot.place_stack(2, 172, 217, 150, 0)
stackbot.pick_stack(3)
stackbot.place_stack(3, 300, 430, 90, 0)
stackbot.pick_stack(4)
stackbot.place_stack(4, 274, 120, 90, 1)
stackbot.pick_stack(5)
stackbot.place_stack(5, 180, 373, 30, 1)
stackbot.pick_stack(6)
stackbot.place_stack(6, 412, 333, 150, 1)
# while True:
# ori_z += 16
# pick_stack(stack_id)
# jetmax.set_position((ori_x, ori_y, ori_z), 1)
# time.sleep(1)
# sucker.release()
# time.sleep(2)
# stack_id += 1
# while True:
# jetmax.set_position((cur_x-100, cur_y, cur_z), 1)
# time.sleep(2)
# jetmax.set_position((cur_x+100, cur_y, cur_z), 1)
# time.sleep(2)
# jetmax.set_position((cur_x, cur_y, cur_z), 1)
# time.sleep(2)
# jetmax.set_position((cur_x, cur_y-50, cur_z), 1)
# time.sleep(2)
# jetmax.set_position((cur_x, cur_y+50, cur_z), 1)
# time.sleep(2)
# jetmax.set_position((cur_x, cur_y, cur_z), 1)
# time.sleep(2)
# jetmax.set_position((cur_x, cur_y, cur_z-100), 1)
# time.sleep(2)
# jetmax.set_position((cur_x, cur_y, cur_z), 0.5)
# time.sleep(2)

127
src/Sensor/scripts/asr.py Executable file
View File

@ -0,0 +1,127 @@
#!/usr/bin/env python3
# coding=utf8
'''
* 只能识别汉字将要识别的汉字转换成拼音字母每个汉字之间空格隔开比如幻尔科技 --> huan er ke ji
* 最多添加50个词条每个词条最长为79个字符每个词条最多10个汉字
* 每个词条都对应一个识别号1~255随意设置不同的语音词条可以对应同一个识别号
* 比如幻尔科技幻尔都可以将识别号设置为同一个值
* 模块上的STA状态灯亮起表示正在识别语音灭掉表示不会识别语音当识别到语音时状态灯会变暗或闪烁等待读取后会恢复当前的状态指示
'''
import smbus2
import time
import numpy
#幻尔科技语音识别模块例程#
class ASR:
# Global Variables
address = 0x79
bus = None
ASR_RESULT_ADDR = 100
#识别结果存放处,通过不断读取此地址的值判断是否识别到语音,不同的值对应不同的语音
ASR_WORDS_ERASE_ADDR = 101
#擦除所有词条
ASR_MODE_ADDR = 102
#识别模式设置值范围1~3
#1循环识别模式。状态灯常亮默认模式
#2口令模式以第一个词条为口令。状态灯常灭当识别到口令词时常亮等待识别到新的语音,并且读取识别结果后即灭掉
#3按键模式按下开始识别不按不识别。支持掉电保存。状态灯随按键按下而亮起不按不亮
ASR_ADD_WORDS_ADDR = 160
#词条添加的地址,支持掉电保存
def __init__(self, bus=1):
self.bus = smbus2.SMBus(bus)
def readByte(self):
try:
result = self.bus.read_byte(self.address)
except:
return None
return result
def writeByte(self, val):
try:
value = self.bus.write_byte(self.address, val)
except:
return False
if value != 0:
return False
return True
def writeData(self, reg, val):
try:
self.bus.write_byte(self.address, reg)
self.bus.write_byte(self.address, val)
except:
pass
def getResult(self):
if ASR.writeByte(self, self.ASR_RESULT_ADDR):
return -1
try:
value = self.bus.read_byte(self.address)
except:
return None
return value
'''
* 添加词条函数
* idNum词条对应的识别号1~255随意设置识别到该号码对应的词条语音时
* 会将识别号存放到ASR_RESULT_ADDR处等待主机读取读取后清0
* words要识别汉字词条的拼音汉字之间用空格隔开
*
* 执行该函数词条是自动往后排队添加的
'''
def addWords(self, idNum, words):
buf = [idNum]
for i in range(0, len(words)):
buf.append(eval(hex(ord(words[i]))))
try:
self.bus.write_i2c_block_data(self.address, self.ASR_ADD_WORDS_ADDR, buf)
except:
pass
time.sleep(0.1)
def eraseWords(self):
try:
result = self.bus.write_byte_data(self.address, self.ASR_WORDS_ERASE_ADDR, 0)
except:
return False
time.sleep(0.1)
if result != 0:
return False
return True
def setMode(self, mode):
try:
result = self.bus.write_byte_data(self.address, self.ASR_MODE_ADDR, mode)
except:
return False
time.sleep(0.1)
if result != 0:
return False
return True
if __name__ == "__main__":
asr = ASR()
#添加的词条和识别模式是可以掉电保存的第一次设置完成后可以将1改为0
if 1:
asr.eraseWords()
asr.setMode(2)
asr.addWords(1, 'kai shi')
asr.addWords(2, 'fen jian hong se')
asr.addWords(3, 'fen jian lv se')
asr.addWords(4, 'fen jian lan se')
asr.addWords(5, 'fen jian biao qian yi')
asr.addWords(6, 'fen jian biao qian er')
asr.addWords(7, 'fen jian biao qian san')
asr.addWords(8, 'ting zhi fen jian')
while 1:
data = asr.getResult()
if data:
print("result:", data)
time.sleep(0.01)

View File

@ -0,0 +1,23 @@
#!/usr/bin/env python3
import hiwonder
import time
"""
控制蜂鸣器
"""
def main():
while True:
#jetmax上的蜂鸣器为有源蜂鸣器只能控制开关不可控制音调
for i in range(5):
hiwonder.buzzer.on() #打开蜂鸣器
time.sleep(0.1) #
hiwonder.buzzer.off() #关闭蜂鸣器
time.sleep(0.1)
time.sleep(1)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,89 @@
import numpy as np
import cv2
import pickle
def pixel2robot(p_x, p_y, Zc=260):
"""
将像素坐标转换为机器人坐标系坐标
参数
p_x (int)像素坐标系x轴坐标
p_y (int)像素坐标系y轴坐标
返回值
robot_coords (numpy.ndarray)机器人基坐标系下的坐标
示例
>>> pixel2robot(100, 200)
array([-51.464, -41.334, 774.52])
"""
# 摄像头内参矩阵
camera_matrix = np.array([[9.751303247980034e+02, 0, 9.068156759228305e+02],
[0, 9.771159378738466e+02, 5.153786912293900e+02],
[0, 0, 1]])
# R = np.array([[-0.67004953, -0.74115346, 0.04153518],
# [0.73884813, -0.67127662, -0.05908594],
# [0.07167334, -0.00890232, 0.99738843]])
# print(R)
# T = np.array([0.06106497, -0.06272417, 0.7103077])
npzfile = np.load('camera_params.npz')
R = npzfile['R']
T = npzfile['T']
T = np.reshape(T, (3,))
# 336, 174
u, v = p_x, p_y
Zc = Zc
Xc = (u - camera_matrix[0, 2]) * Zc / camera_matrix[0, 0]
Yc = (v - camera_matrix[1, 2]) * Zc / camera_matrix[1, 1]
# 相机坐标系到标定板坐标系的转换
camera_coords = np.array([Xc, Yc, Zc])
base_coords = np.dot(R, camera_coords) + T
print(base_coords)
taR = np.array([[0, -1, 0],
[-1, 0, 0],
[0, 0, 0]])
taT = np.array([-22.73, -149.07, 82])
# print(np.dot(taR, base_coords))
base_coords = np.dot(taR, base_coords) + taT
return base_coords
import cv2
import numpy as np
camera_matrix = np.array([[9.751303247980034e+02, 0, 9.068156759228305e+02],
[0, 9.771159378738466e+02, 5.153786912293900e+02],
[0, 0, 1]])
dist_coeffs = np.array([0.026868761843987, 0.042018062902688, 0, 0, 0])
def covert(point=[[0.0, 0.0]], z=260):
point = np.array(point, dtype=np.float)
pts_uv = cv2.undistortPoints(point, camera_matrix, dist_coeffs) * z
return pts_uv[0][0]
print(covert([993,10],260))
print(covert([554,559],260))
# if __name__ == '__main__':
# pixel2robot(993, 10)
# pixel2robot(554, 559)

34
src/Sensor/scripts/dc_motor.py Executable file
View File

@ -0,0 +1,34 @@
#!/usr/bin/env python3
import hiwonder
import time
"""
控制多个pwm舵机转动
"""
def main():
while True:
#jetmax 上一共有3个直流马达接口, 最大输出电压为电源输入电压
#速度范围为-100~+100 0为停止
# + - 对应方向, 数值只能为整数
hiwonder.motor1.set_speed(100) #控制1号电机速度为100
hiwonder.motor2.set_speed(-100) #控制2号电机速度为-100
time.sleep(2)
#你还可以通过getattr()来获取对应的对象进行操作
def set_motor(id_, speed):
getattr(hiwonder, "motor%d"%id_).set_speed(speed)
set_motor(1, 0) #控制1号电机速度为0
set_motor(2, 0) #控制2号电机速度为0
set_motor(3, 0) #控制3号电机速度为0
time.sleep(5)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,59 @@
#!/usr/bin/env python3
import hiwonder
import time
"""
将led点阵模块连接到jetmax 的GPIO2口
"""
# 字模数据
font = {'0':0x3f,'1':0x06,'2':0x5b,'3':0x4f,'4':0x66,'5':0x6d,'6':0x7d,'7':0x07,'8':0x7f,'9':0x6f, '-': 0x40}
def main():
mtx = hiwonder.TM1640(4, 9) #4, 9 对应 clk dio 引脚号
mtx.gram = [0xFF] * 4 #gram用作点阵的显存 共4个字节 一个字节对应一位数字
time.sleep(1)
#显示整数
def display_int(num):
s = str(num) #将数转为字符串
buf = [font[c] for c in s[:4]] #只显示数值的前4位数字
mtx.gram = buf
if len(buf) < 4: #如果不够4位前面填充0, 这样数字就会靠右显示
buf_zero = [0] * (4 - len(buf))
buf_zero.extend(buf)
mtx.gram = buf_zero
mtx.refresh()
for i in range(-1000,1222, 13):
display_int(i)
#显示小数
def display_float(num):
s = "{:0.1f}".format(num) #将数字转为字符串保留1位小数
buf = []
for c in s:
if c in font:
buf.append(font[c])
else:
if c == '.': #如果是小数点, 将前面一位数字的显示增加小点
buf[-1] = buf[-1] | 0x80
mtx.gram = buf
if len(buf) < 4: #如果不够4位前面填充0, 这样数字就会靠右显示
buf_zero = [0] * (4 - len(buf))
buf_zero.extend(buf)
mtx.gram = buf_zero
mtx.refresh()
i = -10.0
while i < 10.0:
display_float(i)
i += 0.1
while True:
display_int(int(hiwonder.sonar.get_distance()))
time.sleep(0.2)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,193 @@
#!/usr/bin/env python3
import sys
import math
import rospy
import time
import queue
import threading
import cv2
import numpy as np
import hiwonder
from sensor_msgs.msg import Image as RosImage
from PIL import Image
import torch
from facenet_pytorch import MTCNN
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
from FER import fer
mtcnn = MTCNN(keep_all=True, min_face_size=50, factor=0.709, post_process=False, device=device)
ROS_NODE_NAME = 'face_expression'
jetmax = hiwonder.JetMax()
image_queue = queue.Queue(maxsize=1)
mtx = hiwonder.TM1640(4, 9)
mtx.gram = [0xFF] * 16
mtx.refresh()
def display_expression(name):
expressions = {
'Happy' : """---XX------XX---
--X--X----X--X--
-X----X--X----X-
----------------
----------------
---X--------X---
----X------X----
-----XXXXXX-----""".replace(' ', '').split('\n'),
'Angry' : """-XX----------XX-
--XX--------XX--
---XX------XX---
----XX----XX----
----------------
----XXXXXXXX----
---XX------XX---
--XX--------XX--""".replace(' ', '').split('\n'),
'Sad' : """---XXX----XXX---
--XX--------XX--
XXX----------XXX
----------------
----------------
----XXXXXXXX----
--XX--------XX--
-X------------X-""".replace(' ', '').split('\n')
}
if name in expressions:
e = expressions[name]
for y in range(8):
for x in range(16):
mtx.set_bit(x, y, 1 if e[y][x] == 'X' else 0)
else:
mtx.gram = [0] * 16
mtx.refresh()
class FaceExpression:
def __init__(self):
self.lock = threading.RLock()
self.servo_x = 500
self.face_tracking = None
self.no_face_count = 0
self.lost_target_count = 0
self.is_running_face = False
self.fps = 0.0
self.tic = time.time()
self.exp = ''
self.count = 0
def show_faces(img, boxes, landmarks, color):
new_boxes = []
new_landmarks = []
for bb, ll in zip(boxes, landmarks):
x1 = int(hiwonder.misc.val_map(bb[0], 0, 160, 0, 640))
y1 = int(hiwonder.misc.val_map(bb[1], 0, 160, 0, 480))
x2 = int(hiwonder.misc.val_map(bb[2], 0, 160, 0, 640))
y2 = int(hiwonder.misc.val_map(bb[3], 0, 160, 0, 480))
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
new_boxes.append([x1, y1, x2, y2])
landmarks_curr_face = []
if len(landmarks):
for j in range(5):
lx = int(hiwonder.misc.val_map(ll[j][0], 0, 160, 0, 640))
ly = int(hiwonder.misc.val_map(ll[j][1], 0, 160, 0, 480))
cv2.circle(img, (lx, ly), 2, color, 2)
landmarks_curr_face.append([lx, ly])
new_landmarks.append(landmarks_curr_face)
return img, new_boxes, new_landmarks
def image_proc_a(image):
img_ret = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
org_img1 = np.copy(image)
image = cv2.resize(image, (160, 160))
boxes, _, landmarks = mtcnn.detect(Image.fromarray(image), landmarks=True)
if boxes is None:
return img_ret
boxes = list(boxes)
landmarks = list(landmarks)
img_ret, boxes, landmarks = show_faces(img_ret, boxes, landmarks, (0, 255, 0))
box = None
if state.face_tracking is None:
if len(boxes) > 0:
box = min(boxes, key=lambda b: math.sqrt(((b[2] + b[0]) / 2 - 320) ** 2 + ((b[3] + b[1]) / 2 - 240) ** 2))
x1, y1, x2, y2 = box
center_x, center_y = int((x1 + x2) / 2), int((y1 + y2) / 2)
cv2.rectangle(img_ret, (x1, y1), (x2, y2), (255, 0, 0), 3)
cv2.circle(img_ret, (center_x, center_y), 2, (0, 255, 0), 4)
state.face_tracking = center_x, center_y
state.no_face_count = time.time() + 2
else:
centers = [(int((box[2] + box[0]) / 2), int((box[3] + box[1]) / 2), box) for box in boxes]
get_face = False
if len(centers) > 0:
center_x, center_y = state.face_tracking
cv2.circle(img_ret, (center_x, center_y), 2, (255, 0, 0), 4)
min_dist_center = min(centers, key=lambda c: math.sqrt((c[0] - center_x) ** 2 + (c[1] - center_y) ** 2))
new_center_x, new_center_y, box = min_dist_center
x1, y1, x2, y2 = box
dist = math.sqrt((new_center_x - center_x) ** 2 + (new_center_y - center_y) ** 2)
if dist < 250:
cv2.rectangle(img_ret, (x1, y1), (x2, y2), (0, 0, 255), 3)
cv2.circle(img_ret, (new_center_x, new_center_y), 2, (0, 0, 255), 4)
get_face = True
state.face_tracking = int(new_center_x), int(new_center_y)
state.no_face_count = time.time() + 0.2
if state.no_face_count < time.time():
state.face_tracking = None
get_face = False
if get_face:
x1, y1, x2, y2 = box
x1 = 0 if x1 < 0 else 639 if x1 > 639 else x1
x2 = 0 if x2 < 0 else 639 if x2 > 639 else x2
y1 = 0 if y1 < 0 else 479 if y1 > 479 else y1
y2 = 0 if y2 < 0 else 479 if y2 > 479 else y2
print(x1, y1, x2, y2)
face = org_img1[y1: y2, x1: x2]
cv2.imshow("face", cv2.cvtColor(face, cv2.COLOR_RGB2BGR))
a = fer.fer(face)
cv2.putText(img_ret, a, (0, 200), 0, 1.6, (255, 50, 50), 5)
display_expression(a)
return img_ret
def image_proc_b():
ros_image = image_queue.get(block=True)
image = np.ndarray(shape=(ros_image.height, ros_image.width, 3), dtype=np.uint8, buffer=ros_image.data)
image = image_proc_a(image)
toc = time.time()
curr_fps = 1.0 / (state.tic - toc)
state.fps = curr_fps if state.fps == 0.0 else (state.fps * 0.95 + curr_fps * 0.05)
state.tic = toc
cv2.imshow(ROS_NODE_NAME, image)
cv2.waitKey(1)
def image_callback(ros_image):
try:
image_queue.put_nowait(ros_image)
except queue.Full:
pass
if __name__ == '__main__':
rospy.init_node(ROS_NODE_NAME, log_level=rospy.DEBUG)
state = FaceExpression()
jetmax.go_home(1)
image_sub = rospy.Subscriber('/usb_cam/image_rect_color', RosImage, image_callback)
try:
while not rospy.is_shutdown():
image_proc_b()
except Exception as e:
rospy.logerr(e)
print(e.__traceback__.tb_lineno)
sys.exit()

View File

@ -0,0 +1,178 @@
#!/usr/bin/env python3
import math
import rospy
import time
import queue
import threading
from sensor_msgs.msg import Image as RosImage
from std_srvs.srv import SetBool, SetBoolResponse, SetBoolRequest
from std_srvs.srv import Trigger, TriggerResponse, TriggerRequest
from std_srvs.srv import Empty
from object_tracking.srv import SetTarget, SetTargetResponse, SetTargetRequest
import cv2
import numpy as np
import hiwonder
import sys
import Jetson.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(9, GPIO.OUT)
GPIO.setup(4, GPIO.OUT)
GPIO.output(9, 0)
GPIO.output(4, 0)
from PIL import Image
import torch
from facenet_pytorch import MTCNN, InceptionResnetV1
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Running on device: {}'.format(device))
mtcnn = MTCNN(keep_all=True, min_face_size=50, factor=0.709, post_process=False, device=device)
ROS_NODE_NAME = 'face_tracking'
TARGET_PIXEL_X, TARGET_PIXEL_Y = 320, 240
class FaceTracking:
def __init__(self):
self.servo_x = 500
self.servo_y = 500
self.face_x_pid = hiwonder.PID(0.09, 0.005, 0.005)
self.face_z_pid = hiwonder.PID(0.1, 0.0, 0.0)
self.tracking_face = None
self.no_face_count = 0
self.lost_target_count = 0
self.fan_on = False
def show_faces(img, boxes, landmarks, color):
new_boxes = []
new_landmarks = []
for bb, ll in zip(boxes, landmarks):
x1 = int(hiwonder.misc.val_map(bb[0], 0, 160, 0, 640))
y1 = int(hiwonder.misc.val_map(bb[1], 0, 160, 0, 480))
x2 = int(hiwonder.misc.val_map(bb[2], 0, 160, 0, 640))
y2 = int(hiwonder.misc.val_map(bb[3], 0, 160, 0, 480))
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
new_boxes.append([x1, y1, x2, y2])
landmarks_curr_face = []
if len(landmarks[0]):
for j in range(5):
lx = int(hiwonder.misc.val_map(ll[j][0], 0, 160, 0, 640))
ly = int(hiwonder.misc.val_map(ll[j][1], 0, 160, 0, 480))
cv2.circle(img, (lx, ly), 2, color, 4)
landmarks_curr_face.append([lx, ly])
new_landmarks.append(landmarks_curr_face)
return img, new_boxes, new_landmarks
def fan_control():
while True:
if state.fan_on:
GPIO.output(9, 0)
GPIO.output(4, 1)
time.sleep(0.01)
GPIO.output(9, 0)
GPIO.output(4, 0)
time.sleep(0.01)
else:
GPIO.output(9, 0)
GPIO.output(4, 0)
time.sleep(0.01)
def face_tracking(image):
ta = time.time()
org_img = np.copy(image)
image = cv2.resize(image, (160, 160))
boxes, _, landmarks = mtcnn.detect(Image.fromarray(image), landmarks=True)
if boxes is not None:
boxes = boxes.tolist()
landmarks = landmarks.tolist()
org_img, boxes, landmarks = show_faces(org_img, boxes, landmarks, (0, 255, 0))
else:
boxes = []
if state.tracking_face is None:
if len(boxes) > 0:
box = min(boxes, key=lambda b: math.sqrt(((b[2] + b[0]) / 2 - 320) ** 2 + ((b[3] + b[1]) / 2 - 240) ** 2))
x1, y1, x2, y2 = box
org_img = cv2.rectangle(org_img, (x1, y1), (x2, y2), (255, 0, 0), 3)
center_x, center_y = int((x1 + x2) / 2), int((y1 + y2) / 2)
org_img = cv2.circle(org_img, (center_x, center_y), 2, (0, 255, 0), 4)
state.tracking_face = center_x, center_y
state.no_face_count = time.time() + 2
state.face_x_pid.clear()
state.face_z_pid.clear()
else:
centers = [(int((box[2] + box[0]) / 2), int((box[3] + box[1]) / 2), box) for box in boxes]
get_face = False
if len(centers) > 0:
center_x, center_y = state.tracking_face
org_img = cv2.circle(org_img, (center_x, center_y), 2, (0, 0, 255), 4)
min_dist_center = min(centers, key=lambda c: math.sqrt((c[0] - center_x) ** 2 + (c[1] - center_y) ** 2))
new_center_x, new_center_y, box = min_dist_center
x1, y1, x2, y2 = box
dist = math.sqrt((new_center_x - center_x) ** 2 + (new_center_y - center_y) ** 2)
if dist < 150:
org_img = cv2.rectangle(org_img, (x1, y1), (x2, y2), (255, 0, 0), 3)
org_img = cv2.circle(org_img, (new_center_x, new_center_y), 2, (255, 0, 0), 4)
get_face = True
state.tracking_face = int(new_center_x), int(new_center_y)
state.no_face_count = time.time() + 2
if state.no_face_count < time.time():
state.tracking_face = None
get_face = False
state.fan_on = False
print("FAN OFF")
if get_face:
state.fan_on = True
center_x, center_y = state.tracking_face
x = center_x - 320
if abs(x) > 30:
state.face_x_pid.SetPoint = 0
state.face_x_pid.update(x)
state.servo_x += state.face_x_pid.output
jetmax.set_servo(1, int(state.servo_x), 0.02)
else:
state.face_x_pid.update(0)
z = center_y - 240
if abs(z) > 30:
state.face_z_pid.SetPoint = 0
state.face_z_pid.update(z)
z = jetmax.position[2] + state.face_z_pid.output
jetmax.set_position((jetmax.position[0], jetmax.position[1], z), 0.02)
else:
state.face_z_pid.update(0)
print(center_x, center_y)
return org_img
def image_proc():
global mtcnn
ros_image = image_queue.get(block=True)
image = np.ndarray(shape=(ros_image.height, ros_image.width, 3), dtype=np.uint8, buffer=ros_image.data)
image = face_tracking(image)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imshow("result", image)
cv2.waitKey(1)
def image_callback(ros_image):
try:
image_queue.put_nowait(ros_image)
except queue.Full:
pass
if __name__ == '__main__':
rospy.init_node(ROS_NODE_NAME, log_level=rospy.DEBUG)
state = FaceTracking()
jetmax = hiwonder.JetMax()
jetmax.go_home()
rospy.sleep(1)
image_queue = queue.Queue(maxsize=1)
threading.Thread(target=fan_control, daemon=True).start()
image_sub = rospy.Subscriber('/usb_cam/image_rect_color', RosImage, image_callback) # 订阅摄像头画面
while not rospy.is_shutdown():
image_proc()

View File

@ -0,0 +1,66 @@
#!/usr/bin/env python3
import time
import Jetson.GPIO as GPIO
"""
控制风扇模块
风扇模块上有独立的马达驱动电路 需要使用两个io口来控制其正反转 或者两个io口通过pwm控制其速度
可以将风扇模块使用4pin 线连接到 jetmax 扩展板上的 GPIO2
其对应的引脚是 gpio9, 和gpio4
"""
#初始化io口
def init_fan():
GPIO.setmode(GPIO.BCM) #设置gpio控制库为bcm模式
GPIO.setup(9, GPIO.OUT) #将gpio 9 设置 输出
GPIO.output(9, 0)
GPIO.setup(4, GPIO.OUT) #将gpio 9 设置 输出
GPIO.output(4, 0)
def main():
init_fan()
# 通过两个io口的高低电平控制正反转
# 两个io口均为低电平时 停止转动
# 一高一低对应两个方向
# 要控制速度, 就保持一个io口为低电平 另一个输出pwm
#正转100%速度
GPIO.output(4, 1)
GPIO.output(9, 0)
time.sleep(1)
#停止转动
GPIO.output(9, 0)
GPIO.output(4, 0)
time.sleep(1)
#反转
GPIO.output(4, 0)
GPIO.output(9, 1)
time.sleep(1)
#停止转动
GPIO.output(9, 0)
GPIO.output(4, 0)
time.sleep(1)
#要控制数度就要其中一个口输出pwm
# 例如50%占空比
t = time.time() + 2
while time.time() < t:
GPIO.output(4, 1)
GPIO.output(9, 0)
time.sleep(0.01)
GPIO.output(9, 0)
GPIO.output(4, 0)
time.sleep(0.01)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,224 @@
#!/usr/bin/env python3
import sys
import cv2
import math
import threading
import numpy as np
import rospy
import queue
from sensor_msgs.msg import Image
import hiwonder
import mediapipe as mp
ROS_NODE_NAME = "hand_gesture_tube"
mtx = hiwonder.TM1640(4, 9)
mtx.gram = [0xFF] * 4
mtx.refresh()
class HandGesture:
def __init__(self):
self.moving_color = None
self.target_colors = {}
self.lock = threading.RLock
self.position = 0, 0, 0
self.runner = None
self.count = 0
self.gesture_str = ''
ps = (-205, 0 + 10, 150), (-205, 0 + 10, 120), (-205, 0 + 10, 190)
def vector_2d_angle(v1, v2):
"""
Solve the angle between two vector
"""
v1_x = v1[0]
v1_y = v1[1]
v2_x = v2[0]
v2_y = v2[1]
try:
angle_ = math.degrees(math.acos(
(v1_x * v2_x + v1_y * v2_y) / (((v1_x ** 2 + v1_y ** 2) ** 0.5) * ((v2_x ** 2 + v2_y ** 2) ** 0.5))))
except:
angle_ = 65535.
if angle_ > 180.:
angle_ = 65535.
return angle_
def hand_angle(hand_):
"""
Obtain the angle of the corresponding hand-related vector, and determine the gesture according to the angle
"""
angle_list = []
# ---------------------------- thumb
angle_ = vector_2d_angle(
((int(hand_[0][0]) - int(hand_[2][0])), (int(hand_[0][1]) - int(hand_[2][1]))),
((int(hand_[3][0]) - int(hand_[4][0])), (int(hand_[3][1]) - int(hand_[4][1])))
)
angle_list.append(angle_)
# ---------------------------- index
angle_ = vector_2d_angle(
((int(hand_[0][0]) - int(hand_[6][0])), (int(hand_[0][1]) - int(hand_[6][1]))),
((int(hand_[7][0]) - int(hand_[8][0])), (int(hand_[7][1]) - int(hand_[8][1])))
)
angle_list.append(angle_)
# ---------------------------- middle
angle_ = vector_2d_angle(
((int(hand_[0][0]) - int(hand_[10][0])), (int(hand_[0][1]) - int(hand_[10][1]))),
((int(hand_[11][0]) - int(hand_[12][0])), (int(hand_[11][1]) - int(hand_[12][1])))
)
angle_list.append(angle_)
# ---------------------------- ring
angle_ = vector_2d_angle(
((int(hand_[0][0]) - int(hand_[14][0])), (int(hand_[0][1]) - int(hand_[14][1]))),
((int(hand_[15][0]) - int(hand_[16][0])), (int(hand_[15][1]) - int(hand_[16][1])))
)
angle_list.append(angle_)
# ---------------------------- pink
angle_ = vector_2d_angle(
((int(hand_[0][0]) - int(hand_[18][0])), (int(hand_[0][1]) - int(hand_[18][1]))),
((int(hand_[19][0]) - int(hand_[20][0])), (int(hand_[19][1]) - int(hand_[20][1])))
)
angle_list.append(angle_)
return angle_list
def h_gesture(angle_list):
"""
Use the angle of the corresponding hand-related to define the gesture
"""
thr_angle = 65.
thr_angle_thumb = 53.
thr_angle_s = 49.
gesture_str = None
if 65535. not in angle_list:
if (angle_list[0] > thr_angle_thumb) and (angle_list[1] > thr_angle) and (angle_list[2] > thr_angle) and (
angle_list[3] > thr_angle) and (angle_list[4] > thr_angle):
gesture_str = "0"
elif (angle_list[0] > 5) and (angle_list[1] < thr_angle_s) and (angle_list[2] > thr_angle) and (
angle_list[3] > thr_angle) and (angle_list[4] > thr_angle):
gesture_str = "1"
elif (angle_list[0] > thr_angle_thumb) and (angle_list[1] < thr_angle_s) and (angle_list[2] < thr_angle_s) and (
angle_list[3] > thr_angle) and (angle_list[4] > thr_angle):
gesture_str = "2"
elif (angle_list[0] > thr_angle_thumb) and (angle_list[1] < thr_angle_s) and (angle_list[2] < thr_angle_s) and (
angle_list[3] < thr_angle_s) and (angle_list[4] > thr_angle):
gesture_str = "3"
elif (angle_list[0] > thr_angle_thumb) and (angle_list[1] > thr_angle) and (angle_list[2] < thr_angle_s) and (
angle_list[3] < thr_angle_s) and (angle_list[4] < thr_angle_s):
gesture_str = "3"
elif (angle_list[0] > thr_angle_thumb) and (angle_list[1] < thr_angle_s) and (angle_list[2] < thr_angle_s) and (
angle_list[3] < thr_angle_s) and (angle_list[4] < thr_angle_s):
gesture_str = "4"
elif (angle_list[0] < thr_angle_s) and (angle_list[1] < thr_angle_s) and (angle_list[2] < thr_angle_s) and (
angle_list[3] < thr_angle_s) and (angle_list[4] < thr_angle_s):
gesture_str = "5"
elif (angle_list[0] < thr_angle_s) and (angle_list[1] > thr_angle) and (angle_list[2] > thr_angle) and (
angle_list[3] > thr_angle) and (angle_list[4] < thr_angle_s):
gesture_str = "6"
else:
gesture_str = None
return gesture_str
ngcs = {
'one': 'gcode/1.ngc',
'two': 'gcode/2.ngc',
'three': 'gcode/3.ngc',
'four': 'gcode/4.ngc',
'five': 'gcode/5.ngc',
'six': 'gcode/6.ngc',
'hand_heart': 'ngcs/hand_heart.ngc',
}
def draw_num(num):
all_num = ['one', 'two', 'three', 'four', 'five', 'six', 'fist', 'hand_heart', 'nico-nico-ni', "OK"]
i = all_num.index(num)
if i < 6:
for i in range(i + 1):
hiwonder.buzzer.on()
rospy.sleep(0.1)
hiwonder.buzzer.off()
rospy.sleep(0.1)
else:
if i == 6:
hiwonder.buzer.on()
rospy.sleep(1)
hiwonder.buzer.off()
def image_proc_a(img):
img_ret = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
if state.runner is not None:
cv2.putText(img, state.gesture_str, (0, 200), 0, 1.5, (100, 100, 255), 5)
return img_ret
results = hands.process(img)
if results.multi_handedness:
for label in results.multi_handedness:
print(label)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
# rospy.logdebug('hand_landmarks:', hand_landmarks)
mp_drawing.draw_landmarks(img_ret, hand_landmarks, mp_hands.HAND_CONNECTIONS)
hand_local = []
for i in range(21):
x = hand_landmarks.landmark[i].x * img.shape[1]
y = hand_landmarks.landmark[i].y * img.shape[0]
hand_local.append((x, y))
if hand_local:
angle_list = hand_angle(hand_local)
gesture_str = h_gesture(angle_list)
if gesture_str:
cv2.putText(img_ret, gesture_str, (0, 200), 0, 1.5, (100, 100, 255), 5)
n = int(gesture_str)
mtx.tube_display_int(n)
else:
mtx.gram = [0] * 4
mtx.refresh()
return img_ret
def image_proc_b():
ros_image = image_queue.get(block=True)
image = np.ndarray(shape=(ros_image.height, ros_image.width, 3), dtype=np.uint8, buffer=ros_image.data)
image = cv2.flip(image, 1)
image = image_proc_a(image)
image = cv2.resize(image, (1024, 768))
cv2.imshow(ROS_NODE_NAME, image)
cv2.waitKey(1)
def image_callback(ros_image):
try:
image_queue.put_nowait(ros_image)
except queue.Full:
pass
if __name__ == '__main__':
rospy.init_node(ROS_NODE_NAME, log_level=rospy.DEBUG)
rospy.sleep(0.2)
state = HandGesture()
image_queue = queue.Queue(maxsize=1)
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
hands = mp_hands.Hands(
static_image_mode=False,
max_num_hands=2,
min_detection_confidence=0.75,
min_tracking_confidence=0.5)
jetmax = hiwonder.JetMax()
jetmax.go_home()
rospy.sleep(1)
hiwonder.motor2.set_speed(0)
image_sub = rospy.Subscriber('/usb_cam/image_rect_color', Image, image_callback, queue_size=1)
while not rospy.is_shutdown():
image_proc_b()

View File

@ -0,0 +1,67 @@
import cv2
import time
cap = cv2.VideoCapture(0)
cap.set(6,cv2.VideoWriter.fourcc('M','J','P','G'))
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
ret, frame = cap.read()
cv2.imwrite(str(time.time()) + '.jpg', frame)
# def SetPoints(windowname, img):
# """
# 输入图片,打开该图片进行标记点,返回的是标记的几个点的字符串
# """
# print('(提示单击需要标记的坐标Enter确定Esc跳过其它重试。)')
# points = []
# def onMouse(event, x, y, flags, param):
# if event == cv2.EVENT_LBUTTONDOWN:
# cv2.circle(temp_img, (x, y), 10, (102, 217, 239), -1)
# points.append([x, y])
# cv2.imshow(windowname, temp_img)
# temp_img = img.copy()
# cv2.namedWindow(windowname)
# cv2.imshow(windowname, temp_img)
# cv2.setMouseCallback(windowname, onMouse)
# key = cv2.waitKey(0)
# if key == 13: # Enter
# print('坐标为:', points)
# del temp_img
# cv2.destroyAllWindows()
# return str(points)
# elif key == 27: # ESC
# print('跳过该张图片')
# del temp_img
# cv2.destroyAllWindows()
# return
# else:
# print('重试!')
# return SetPoints(windowname, img)
# SetPoints('test', frame)
# import json
# import requests
# # Run inference on an image
# url = "https://api.ultralytics.com/v1/predict/MAyxSmPez5SZXdyspDvF"
# headers = {"x-api-key": "803cf64a0603764f0657e33bb0103f8a11ffc59567"}
# data = {"size": 640, "confidence": 0.25, "iou": 0.45}
# with open("test.jpg", "rb") as f:
# response = requests.post(url, headers=headers, data=data, files={"image": f})
# # Check for successful response
# response.raise_for_status()
# # Print inference results
# print(json.dumps(response.json(), indent=2))

View File

@ -0,0 +1,56 @@
import time
import hiwonder
import queue
import rospy
from sensor_msgs.msg import Image as RosImage
import numpy as np
import cv2
jetmax = hiwonder.JetMax()
sucker = hiwonder.Sucker()
ROS_NODE_NAME = 'image_data_taker'
def image_callback(ros_image):
try:
image_queue.put_nowait(ros_image)
except queue.Full:
pass
def up_down():
# for loop from 50 to 200 with 10 per step
for z in range(50, 200, 30):
jetmax.set_position((0, -200, z), 0.3)
rospy.sleep(5)
save_image()
def save_image():
try:
ros_image = image_queue.get(block=True)
image = np.ndarray(shape=(ros_image.height, ros_image.width, 3), dtype=np.uint8, buffer=ros_image.data)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imwrite(str(time.time()) + '.jpg', image)
except queue.Empty:
pass
if __name__ == '__main__':
rospy.init_node(ROS_NODE_NAME, log_level=rospy.DEBUG)
jetmax = hiwonder.JetMax()
# jetmax.go_home()
image_queue = queue.Queue(maxsize=1)
image_sub = rospy.Subscriber('/usb_cam/image_rect_color', RosImage, image_callback) # 订阅摄像头画面
rospy.sleep(3)
# ros_image = image_queue.get(block=True)
# image = np.ndarray(shape=(ros_image.height, ros_image.width, 3), dtype=np.uint8, buffer=ros_image.data)
# image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# rospy.loginfo(image)
# cv2.imwrite('test.jpg', image)
print(jetmax.position)
# up_down()
save_image()
# jetmax.set_position((0, -100, 200), 1)

View File

@ -0,0 +1,15 @@
#!/bin/bash
sudo kill -9 $(ps -elf|grep -v grep|grep python3|grep alphabetically|awk '{print $4}') >/dev/null 2>&1
sudo systemctl stop alphabetically.service
sudo kill -9 $(ps -elf|grep -v grep|grep python3|grep waste_classification|awk '{print $4}') >/dev/null 2>&1
sudo systemctl stop waste_classification.service
sudo kill -9 $(ps -elf|grep -v grep|grep python3|grep object_tracking|awk '{print $4}') >/dev/null 2>&1
sudo systemctl stop object_tracking.service
sudo kill -9 $(ps -elf|grep -v grep|grep python3|grep color_sorting|awk '{print $4}') >/dev/null 2>&1
sudo systemctl stop color_sorting.service

View File

@ -0,0 +1 @@
/usr/local/lib/python3.6/dist-packages/mediapipe

1
src/Sensor/scripts/models Symbolic link
View File

@ -0,0 +1 @@
/home/hiwonder/models

View File

@ -0,0 +1,30 @@
#!/usr/bin/env python3
import hiwonder
import time
"""
控制多个pwm舵机转动
"""
def main():
while True:
#控制多个pwm舵机 只需要将舵机对象的名在最后的数改为对应舵机号即可
hiwonder.pwm_servo1.set_position(90, 1) #控制1号舵机用1秒转动到90度位置
hiwonder.pwm_servo2.set_position(90, 1) #控制2号舵机用1秒转动到90度位置
time.sleep(2)
#你还可以通过getattr()来获取对应的对象进行操作
def set_pwm_servo(id_, pos, duration):
getattr(hiwonder, "pwm_servo%d"%id_).set_position(pos, duration)
set_pwm_servo(1, 180, 1) #控制1号舵机用1秒转动到180度位置
set_pwm_servo(2, 0, 2) #控制2号舵机用2秒转动到0度位置
time.sleep(3)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,22 @@
#!/usr/bin/env python3
import hiwonder
import time
"""
控制pwm舵机转动
"""
def main():
while True:
#控制1号pwm舵机
hiwonder.pwm_servo1.set_position(90, 1) #用1秒转动到90度位置
time.sleep(2)
hiwonder.pwm_servo1.set_position(180, 1) #用1秒转动到180度位置
time.sleep(2)
hiwonder.pwm_servo1.set_position(0, 2) #用2秒转动到0度位置
time.sleep(3)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,29 @@
#!/usr/bin/env python3
import hiwonder
import time
"""
控制pwm舵机按速度转动
"""
def main():
while True:
#控制1号pwm舵机
hiwonder.pwm_servo1.set_position(90, 1) #用1秒转动到90度位置
time.sleep(2)
#通过角速度计算舵机需要的运动时间
# 控制舵机用20度每秒的速度从90度转动到180度
t = (180 - 90) / 20
hiwonder.pwm_servo1.set_position(180, t)
time.sleep(t + 1)
# 控制舵机用40度每秒的速度从180度转动到0度
t = (180 - 0) / 40
hiwonder.pwm_servo1.set_position(0, t)
time.sleep(t + 1)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,21 @@
#!/usr/bin/env python3
import serial
import time
def main():
serialHandle = serial.Serial('/dev/ttyTHS1', 115200) #初始化串口为波特率115200
#演示控制串口收发数据,
#短接jetmax 的 rx, tx引脚(即扩增板上的UART口中间两根引脚) 即可在屏幕上看到打印 abcdefg'
while True:
serialHandle.write(bytes("abcdefg", encoding='utf-8')) #向串口写入数据, 数据需要时bytes bytearray
time.sleep(1)
count = serialHandle.inWaiting() #获取串口接收缓存中接收到的数据的字节数
recv_data = serialHandle.read(count) #将接收到的字节读取出来
print(recv_data)
time.sleep(1)
if __name__=='__main__':
main()

View File

@ -0,0 +1,23 @@
#!/usr/bin/env python3
import hiwonder
import time
"""
jetmax上的舵机的位置数值范围为01000, 对应0240
需要注意 舵机的位置数值和时间数值都需要用整数
"""
def main():
hiwonder.serial_servo.set_position(1, 1000, 3000) #让id为1的舵机用5000毫秒时间从当前位置运动到1000位置
time.sleep(3)
hiwonder.serial_servo.set_position(1, 0, 8000) #让id为1的舵机用8000毫秒时间从当前位置运动到0位置
time.sleep(8)
#如果我们需要用角度来控制的话可以将及角度转化为数值例如现在要转转动到180度位置
p = 180 / 240 * 1000
hiwonder.serial_servo.set_position(1, int(p), 4000) #这就是用4000毫秒从当前位置运动到 180度位置
hiwonder.serial_servo.set_position(1, int(120 / 240 * 1000), 2000) #这就是用2000毫秒从当前位置运动到 120度位置
if __name__ == "__main__":
main()

View File

@ -0,0 +1,33 @@
#!/usr/bin/env python3
import hiwonder
import time
"""
总线舵机支持读取舵机的多个参数及状态
"""
def main():
while True:
id_ = 1
print("ID: %d" % id_)
pos = hiwonder.serial_servo.read_position(id_)
print('Position%d' % pos)
now_temp = hiwonder.serial_servo.read_temp(id_)
print('Temperature%d°' % now_temp)
now_vin = hiwonder.serial_servo.read_vin(id_)
print('Voltage input%dmv' % now_vin)
d = hiwonder.serial_servo.read_deviation(id_)
print('Deviation%d' % d)
limit = hiwonder.serial_servo.read_angle_limit(id_)
print('Position range:%d-%d' % (limit[0], limit[1]))
vin = hiwonder.serial_servo.read_vin_limit(id_)
print('Voltage range:%dmv-%dmv' % (vin[0], vin[1]))
temp = hiwonder.serial_servo.read_temp_limit(id_)
print('Temperature limit:50°-%d°' % temp)
print("\n\n")
time.sleep(1)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,32 @@
#!/usr/bin/env python3
import hiwonder
import time
"""
如果需要用角速度来控制舵机的转动我们需要自己计算舵机从当前位置转动到目标位置的时间
"""
def main():
#让id为1的舵机用500毫秒时间从当前位置运动到200位置, 并记下当前位置
#记下当前位置是为了通过目标角速度计算到达目标位置需要的时间
#当前位置也能通过与舵机通信来读取,读取较慢且麻烦,可以用变量记下来
hiwonder.serial_servo.set_position(1, 200, 500)
time.sleep(2)
#假如现在要用每秒50度的速度转到800位置可以这么计算
angle = 200 / 1000 * 240 # 将当前位置数值转换为角度数值
t_angle = 800 / 1000 * 240 # 将目标位置数值转换为角度数值
th = angle - t_angle #计算当前角度与目标角度的差
t = abs(th / 50) #通过角速度50 计算转到目标位置需要的时间, 我们要计算时间, 时间不能为负值
hiwonder.serial_servo.set_position(1, 800, int(t * 1000)) #因为角速度为度每秒, 这里将秒转换为毫秒
time.sleep(t)
if __name__ == "__main__":
while True:
main()

61
src/Sensor/scripts/sonar.py Executable file
View File

@ -0,0 +1,61 @@
#!/usr/bin/env python3
import hiwonder
import time
"""
超声波灯光控制及距离读取
超声波模块需要连接到jetmax的I2C接口
任意时刻只能连接一个超声波模块
"""
def main():
# 超声波的灯光有有两种模式, 模式0 用户直接控制灯光发出某种颜色, 模式1控制灯光周期变化
hiwonder.sonar.set_rgb_mode(0) #设置为模式0
# 超声波的两个"眼睛" 对应两个灯用0, 1 表示
hiwonder.sonar.set_color(0, 0xFF0000) #0 号灯设为 红色 0xFF0000 是要发出的灯光的RGB值
hiwonder.sonar.set_color(1, 0x00FF00) #1号灯设为 绿色
time.sleep(2)
hiwonder.sonar.set_rgb_mode(1) #设置为 模式1
hiwonder.sonar.set_breath_cycle(0, 0, 0) #将0号灯的红色设置的呼吸亮灭周期设为0, 就是完全熄灭
hiwonder.sonar.set_breath_cycle(0, 1, 0) #将0号灯的绿色设置的呼吸亮灭周期设为0, 就是完全熄灭
hiwonder.sonar.set_breath_cycle(0, 2, 1000) #将0号灯的色蓝色设置的呼吸亮灭周期设为1000毫秒
#这时候可以看到0号灯回以蓝色1000毫秒周期呼吸亮灭
time.sleep(2)
hiwonder.sonar.set_breath_cycle(1, 0, 500) #将1号灯的红色设置的呼吸亮灭周期设为500毫秒
hiwonder.sonar.set_breath_cycle(1, 1, 0) #将1号灯的绿色设置的呼吸亮灭周期设为0, 就是完全熄灭
hiwonder.sonar.set_breath_cycle(1, 2, 1000) #将1号灯的色蓝色设置的呼吸亮灭周期设为1000毫秒
#这时候可以看到1号灯回以蓝色1000毫秒周期呼吸亮灭,红色以500毫秒亮灭
time.sleep(2)
#此外可以使用start_symphony来让rgb灯自行幻彩变化
hiwonder.sonar.start_symphony()
distance = None
last_print_time = time.time()
while True:
d = hiwonder.sonar.get_distance() #读取超声波的距离, 单位为毫秒
#我们可以做点简单的滤波应对可能出现的跳变和错误数据
if distance is None:
distance = d
else:
distance = distance * 0.6 + d * 0.4 #这次测量的距离权重0.4, 之前数值权重0.6
#1秒打印一次数据
if last_print_time < time.time():
last_print_time = time.time() + 1
print(distance)
#0.1秒读取一次超声波数据
time.sleep(0.1)
if __name__ == "__main__":
main()

147
src/Sensor/scripts/sonar_color.py Executable file
View File

@ -0,0 +1,147 @@
#!/usr/bin/env python3
import sys
import cv2
import math
import time
import numpy as np
from sensor_msgs.msg import Image
import hiwonder
import threading
import rospy
"""
17 超声波+识别抓取 (测距识别)
"""
ROS_NODE_NAME = "color_detect"
def moving(block):
global runner
(x, y), r, color_name = block
if color_name == 'red':
hiwonder.sonar.set_color(0, 0xFF0000)
hiwonder.sonar.set_color(1, 0xFF0000)
if color_name == 'green':
hiwonder.sonar.set_color(0, 0x00FF00)
hiwonder.sonar.set_color(1, 0x00FF00)
if color_name == 'blue':
hiwonder.sonar.set_color(0, 0x0000FF)
hiwonder.sonar.set_color(1, 0x0000FF)
hiwonder.buzzer.on()
rospy.sleep(0.1)
hiwonder.buzzer.off()
rospy.sleep(1)
hiwonder.pwm_servo1.set_position(100, 0.5)
rospy.sleep(2)
if color_name == 'red':
jetmax.set_position((238, 80, 130), 2)
rospy.sleep(2)
elif color_name == 'green':
jetmax.set_position((238, 30, 130), 1.8)
rospy.sleep(1.8)
elif color_name == 'blue':
jetmax.set_position((238, -20, 130), 1.6)
rospy.sleep(1.6)
else:
pass
cur = list(jetmax.position)
cur[2] = 85
jetmax.set_position(cur, 1)
hiwonder.pwm_servo1.set_position(130, 0.5)
rospy.sleep(1)
cur[2] = 150
jetmax.set_position(cur, 1.2)
rospy.sleep(2)
jetmax.go_home(2)
rospy.sleep(2)
hiwonder.sonar.set_color(0, 0)
hiwonder.sonar.set_color(1, 0)
runner = None
def image_proc(img):
global runner
img_h, img_w = img.shape[:2]
frame_gb = cv2.GaussianBlur(np.copy(img), (5, 5), 5)
frame_lab = cv2.cvtColor(frame_gb, cv2.COLOR_RGB2LAB) # 转换rgb到lab
blocks = []
frame_roi = frame_lab[0:280, 140:500]
cv2.line(img, (140, 0), (140, 280), (255, 0, 0), 2)
cv2.line(img, (140, 280), (500, 280), (255, 0, 0), 2)
cv2.line(img, (500, 280), (500, 0), (255, 0, 0), 2)
cv2.line(img, (500, 0), (140, 0), (255, 0, 0), 2)
cv2.putText(img, "D:{:0.2f}mm".format(distance), (0, 440), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 0, 0), 2)
if runner is not None:
return img
if distance < 70:
for color_name, color in target_colors.items(): # 遍历所有颜色阈值
frame_mask = cv2.inRange(frame_roi, tuple(color['min']), tuple(color['max']))
eroded = cv2.erode(frame_mask, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))
dilated = cv2.dilate(eroded, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))
contours = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2]
contour_area = map(lambda c: (c, math.fabs(cv2.contourArea(c))), contours)
contour_area = list(filter(lambda c: c[1] > 1200, contour_area)) # 去除过小的色块
if len(contour_area) > 0:
for contour, area in contour_area: # Loop through all the contours found
(center_x, center_y), r = cv2.minEnclosingCircle(contour)
cv2.circle(img, (int(center_x + 140), int(center_y)), 1, hiwonder.COLORS[color_name.upper()], 5)
cv2.circle(img, (int(center_x + 140), int(center_y)), int(r), hiwonder.COLORS[color_name.upper()], 2)
cv2.putText(img, color_name.upper(), (int(center_x + 140), int(center_y)), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 255, 255), 2)
blocks.append(((center_x, center_y), r, color_name))
if len(blocks) > 0:
block = max(blocks, key=lambda x: x[1])
runner = threading.Thread(target=moving, args=(block,), daemon=True)
runner.start()
cv2.line(img, (int(img_w / 2 - 10), int(img_h / 2)), (int(img_w / 2 + 10), int(img_h / 2)), (0, 255, 255), 2)
cv2.line(img, (int(img_w / 2), int(img_h / 2 - 10)), (int(img_w / 2), int(img_h / 2 + 10)), (0, 255, 255), 2)
return img
def image_callback(ros_image):
image = np.ndarray(shape=(ros_image.height, ros_image.width, 3), dtype=np.uint8, buffer=ros_image.data)
frame_result = np.copy(image)
frame_result = image_proc(frame_result)
image_bgr = cv2.cvtColor(frame_result, cv2.COLOR_RGB2BGR)
cv2.imshow("result", image_bgr)
cv2.waitKey(1)
def distance_update():
global distance
t = time.time()
while True:
d = hiwonder.sonar.get_distance()
distance = d if distance is None else d * 0.4 + distance * 0.6
if t < time.time():
print("Distance:{:0.2f}mm".format(distance))
t = time.time() + 1
rospy.sleep(0.05)
if __name__ == '__main__':
rospy.init_node(ROS_NODE_NAME, log_level=rospy.DEBUG)
jetmax = hiwonder.JetMax()
jetmax.go_home()
hiwonder.pwm_servo1.set_position(130, 0.5)
rospy.sleep(1)
distance = None
runner = None
colors = rospy.get_param('/lab_config_manager/color_range_list', {})
target_colors = {}
target_colors['red'] = colors['red']
target_colors['green'] = colors['green']
target_colors['blue'] = colors['blue']
image_sub = rospy.Subscriber("/usb_cam/image_rect_color", Image, image_callback, queue_size=1)
sonar_thread = threading.Thread(target=distance_update, daemon=True)
sonar_thread.start()
hiwonder.sonar.set_color(0, 0x0)
hiwonder.sonar.set_color(1, 0x0)
try:
rospy.spin()
except KeyboardInterrupt:
sys.exit(0)

146
src/Sensor/scripts/sonar_radar.py Executable file
View File

@ -0,0 +1,146 @@
#!/usr/bin/env python3
# encoding:utf-8
import sys
import time
import numpy as np
from math import *
import threading
import multiprocessing
import matplotlib.pyplot as plt
import hiwonder
jetmax = hiwonder.JetMax()
turning_radius = 25
ax = [] # 定义一个 x 轴的空列表用来接收动态的数据
ay = [] # 定义一个 y 轴的空列表用来接收动态的数据
#传感器最大接收数值距离大于600mm时超声波传感器易被干扰
maximum_range=50
time.sleep(3)
hiwonder.sonar.set_rgb_mode(0)
hiwonder.sonar.start_symphony() # 设置超声波RGB颜色渐变模式
#旋转半径R
# 计算出在旋转半径为R、角度a时机械臂逆运动学算法输入参数
def angle(a,R=turning_radius):
x=0.0
y=0.0
if a>=0 and a <=180:
if a==0:
x=R
y=0.0
return x,y
elif a == 90:
x=0
y=R
return x,y
elif a==180:
x=-R
y=0.0
return x,y
else:
y= sin(pi/180*a)*R
x= y/tan(pi/180*a)
return x,y
else:
pass
plt.ion() # 开启一个画图的窗口
plt.xlim(-maximum_range, maximum_range)
plt.ylim(0, maximum_range)
plt.grid(True)
end = 0
start = 0
noise = 0
count = 0
error = 20
distance = 0
def two_dimensional_scatter_diagram():
z=[0,0]
flag =1
# 扫描角度
# 步长越大越块相应的数据也越少
step=2#
#机械臂运动速度 由于实时画图需要一定时间导致此值过小也不会太快而且会有卡顿感
speed=50
sp=speed/1000
Detection_Angle=180
data = np.zeros(Detection_Angle+1, dtype = np.int)
x_y_data=np.zeros((Detection_Angle+1,2), dtype = np.float)
z[0],z[1]=angle(0)
time.sleep(2.2)
begin_time=0
end_time=0
jetmax.go_home()
time.sleep(1)
cur = list(jetmax.position)
cur[1] += 120
jetmax.set_position(cur, 1)
time.sleep(1)
jetmax.set_joint(1, 30, 1)
time.sleep(1)
while True:
if flag: #flag控制机械臂左转后右转
for j in range(0,Detection_Angle+1,step): #Detection_Angle控制探测角度
z[0], z[1] = angle(j)
end_time = time.time()
run_time = end_time-begin_time
if sp>run_time:
time.sleep(abs(sp-run_time)*0.9)
jetmax.set_joint(1, 30 + j, sp)
begin_time = time.time()
data[j]=hiwonder.sonar.get_distance() / 10
if data[j] > maximum_range: #当
data[j]=maximum_range
x=z[0]*data[j]/turning_radius
y=z[1]*data[j]/turning_radius
print(data[j])
ax.append(x)# 添加 i 到 x 轴的数据中
ay.append(y) # 添加 i 的平方到 y 轴的数据中
plt.xlim(-maximum_range, maximum_range)
plt.ylim(0, maximum_range)
plt.grid(True)
plt.plot(ax[-2:], ay[-2:])
plt.pause(0.0000001)# 暂停一下
flag = 0 # 切换转动方向
plt.clf()
ax.clear()
ay.clear()
else:
for j in range(Detection_Angle,-1,-step):
z[0], z[1] = angle(j)
end_time = time.time()
run_time = end_time-begin_time
if sp>run_time:
time.sleep(abs(sp-run_time)*0.9)
jetmax.set_joint(1, 30 + j, sp)
begin_time = time.time()
data[j]=hiwonder.sonar.get_distance() / 10
if data[j] > maximum_range:
data[j]=maximum_range
x=z[0]*data[j]/turning_radius
y=z[1]*data[j]/turning_radius
ax.append(x) # 添加 i 到 x 轴的数据中
ay.append(y) # 添加 i 的平方到 y 轴的数据中
plt.xlim(-maximum_range, maximum_range)
plt.ylim(0, maximum_range)
plt.grid(True)
plt.plot(ax[-2:], ay[-2:])
plt.pause(0.000001) # 暂停一下
flag = 1
plt.clf()
ax.clear()
ay.clear()
if __name__ == "__main__":
two_dimensional_scatter_diagram()

View File

@ -0,0 +1,43 @@
import sys
import math
import hiwonder
import time
jetmax = hiwonder.JetMax()
sucker = hiwonder.Sucker()
target_positions = [(238, 20, 80),(238, 20, 123),(238, 20, 166)]
if __name__ == '__main__':
jetmax.go_home()
sucker.set_state(True)
time.sleep(0.5)
sucker.release(3)
time.sleep(2)
overlay = 0
while(overlay<3): # Pick up the block
hiwonder.pwm_servo1.set_position(90 , 0.1)
jetmax.set_position((0, 80, 120), 1)
time.sleep(1)
sucker.set_state(True) # Turn on the air pump
jetmax.set_position((0, 80, 85 - 5), 1)
time.sleep(1)
jetmax.set_position((0, 80, 180), 1)
time.sleep(1)
hiwonder.pwm_servo1.set_position(90, 0.1)
# Go to the target position
(x, y, z) = target_positions[overlay]
jetmax.set_position((x, y, 180), 1)
time.sleep(1)
jetmax.set_position((x, y, z), 1)
time.sleep(1)
# Put down the block
sucker.release(3) # Turn off the air pump
jetmax.set_position((x, y, 180), 1)
time.sleep(1)
jetmax.go_home()
time.sleep(1)
overlay = overlay+1

View File

@ -0,0 +1,42 @@
import sys
import math
import hiwonder
import time
jetmax = hiwonder.JetMax()
sucker = hiwonder.Sucker()
target_positions = (238, 20, 80)
if __name__ == '__main__':
jetmax.go_home()
sucker.set_state(True)
time.sleep(0.5)
sucker.release(3)
time.sleep(2)
# Pick up the block
hiwonder.pwm_servo1.set_position(90 , 0.1)
jetmax.set_position((0, 80, 120), 1)
time.sleep(1)
sucker.set_state(True) # Turn on the air pump
jetmax.set_position((0, 80, 85 - 5), 1)
time.sleep(1)
jetmax.set_position((0, 80, 180), 1)
time.sleep(1)
hiwonder.pwm_servo1.set_position(90, 0.1)
# Go to the target position
(x, y, z) = target_positions[overlay]
jetmax.set_position((x, y, 180), 1)
time.sleep(1)
jetmax.set_position((x, y, z), 1)
time.sleep(1)
# Put down the block
sucker.release(3) # Turn off the air pump
jetmax.set_position((x, y, 180), 1)
time.sleep(1)
jetmax.go_home()
time.sleep(1)

50
src/Sensor/scripts/test.py Executable file
View File

@ -0,0 +1,50 @@
import sys
import math
import hiwonder
import time
jetmax = hiwonder.JetMax()
sucker = hiwonder.Sucker()
target_positions = (238, 20, 80)
if __name__ == '__main__':
jetmax.go_home()
cur_x, cur_y, cur_z = jetmax.position
print(cur_x, cur_y, cur_z)
sucker.set_state(True)
time.sleep(0.5)
sucker.release(3)
time.sleep(2)
hiwonder.pwm_servo2.set_position(0, 0.1)
time.sleep(1)
jetmax.set_position((100, -230, 50), 1)
time.sleep(1)
cur_x, cur_y, cur_z = jetmax.position
print(cur_x, cur_y, cur_z)
# Pick up the block
'''
hiwonder.pwm_servo1.set_position(90 , 0.1)
jetmax.set_position((0, 80, 120), 1)
time.sleep(1)
sucker.set_state(True) # Turn on the air pump
jetmax.set_position((0, 80, 85 - 5), 1)
time.sleep(1)
jetmax.set_position((0, 80, 180), 1)
time.sleep(1)
hiwonder.pwm_servo1.set_position(90, 0.1)
# Go to the target position
(x, y, z) = target_positions[overlay]
jetmax.set_position((x, y, 180), 1)
time.sleep(1)
jetmax.set_position((x, y, z), 1)
time.sleep(1)
# Put down the block
sucker.release(3) # Turn off the air pump
jetmax.set_position((x, y, 180), 1)
time.sleep(1)
jetmax.go_home()
time.sleep(1)
'''

View File

@ -0,0 +1,53 @@
#!/usr/bin/env python3
import hiwonder
import time
"""
将led点阵模块连接到jetmax 的GPIO2口
"""
def main():
mtx = hiwonder.TM1640(4, 9) #4, 9 对应 clk dio 引脚号
mtx.gram = [0xFF] * 16 #gram用作点阵的显存 共16个字节 一个字节对应一列led点, 这里就是将所有点都点亮
print(mtx.gram)
mtx.refresh() #设置了显存之后调用 refresh 将显存内容显示出来
time.sleep(2)
#可以使用set_bit来设置显存内某个点的亮灭
# set_bit(x, y, 新状态) 0代表灭 1代表亮
for y in range(8):
for x in range(0, 16, 4):
mtx.set_bit(x, y, 0)
mtx.set_bit(x+1, y, 0)
mtx.set_bit(x+2, y, 0)
mtx.set_bit(x+3, y, 0)
mtx.refresh()
#可以用字符来组成图画然后画出来
smile = """__XXXX____XXXX__
_X____X__X____X_
________________
________________
________________
__X__________X__
___X________X___
____XXXXXXXX____""".replace(' ', '').split('\n')
print(smile)
for y in range(8):
for x in range(16):
mtx.set_bit(x, y, 1 if smile[y][x] == 'X' else 0)
mtx.refresh()
time.sleep(3)
#清空屏幕
mtx.gram = [0] * 16
mtx.refresh()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,91 @@
#!/usr/bin/env python3
import os
import sys
import cv2
import rospy
import numpy as np
import threading
import hiwonder
import Jetson.GPIO as GPIO
import time
"""
触摸传感器控制臂
短按执行一个动作
连续短按连次就是复位
长按机械臂向下运动需要一定值后自动复位
"""
ROS_NODE_NAME = "touch_control"
if __name__ == '__main__':
rospy.init_node(ROS_NODE_NAME, log_level=rospy.DEBUG)
GPIO.setmode(GPIO.BCM)
GPIO.setup(9, GPIO.IN)
jetmax = hiwonder.JetMax()
jetmax.go_home(2)
rospy.sleep(2)
last_state = 1
press_time = time.time()
longpress_time = time.time()
short_count = 0
next_short_time = time.time()
def x_short(n):
print('短按%d' % n)
if n == 1:
jetmax.set_position((jetmax.ORIGIN[0], jetmax.ORIGIN[1], 70), 1)
rospy.sleep(1.1)
jetmax.set_servo(1, int(210 / 240 * 1000), 1)
rospy.sleep(1.1)
pos = list(jetmax.position)
pos[2] += 100
jetmax.set_position(pos, 1)
rospy.sleep(1.1)
jetmax.go_home(1)
rospy.sleep(1.1)
elif n == 2:
jetmax.go_home()
rospy.sleep(1.1)
else:
pass
while not rospy.is_shutdown():
if next_short_time < time.time() and short_count > 0: #如果距离上次短按超过设定时间就执行短按动作
x_short(short_count)
short_count = 0
current_state = GPIO.input(9) #获取当前状态
if current_state != last_state: #新旧状态不同, 就是触摸按钮被操作了
if current_state == 0: #如果新状态为0, 就是被按下的
print("按钮被按下")
pressed_time = time.time() #记下被按下时的时刻
longpress_time = pressed_time
else: #如果新状态为1, 就是被松开
print("按钮被松开", end='')
if pressed_time == longpress_time:
print(", 是短按")
short_count += 1
next_short_time = time.time() + 0.5 #0.5秒内按下第二次就是连按
else:
print(", 是长按")
else: #如果新旧状态相同, 就是没有操作按钮, 或者一直按着按钮
if current_state == 0: #如果按钮是被按着的
dur = time.time() - pressed_time #计算一下被按了多久
if dur > 2:
print("按钮被长按")
longpress_time = time.time()
pos = list(jetmax.position)
pos[2] -= 2
if pos[2] <= 60:
jetmax.go_home()
rospy.sleep(1.1)
else:
jetmax.set_position(pos, 0.05)
last_state = current_state #将新状态存起来
time.sleep(0.05) #50毫秒刷新一次按钮状态

View File

@ -0,0 +1,48 @@
#!/usr/bin/env python3
import Jetson.GPIO as GPIO
import time
"""
触摸传感器读取实验
触摸传感器是简单的数字io传感器 只要读取对应IO口的电平变化即可
这里我们把传感器连接到jetmax的GPIO2接口上 读取gpio9 引脚的电平即可获取到触摸传感器的状态
根据状态可以判断长按断按等
需要注意当触摸传感器没被按下时输出高电平 被按下输出低电平
"""
def main():
#初始化io口
GPIO.setmode(GPIO.BCM)
GPIO.setup(9, GPIO.IN)
last_state = 1
press_time = time.time()
longpress_time = time.time()
while True:
current_state = GPIO.input(9) #获取当前状态
if current_state != last_state: #新旧状态不同, 就是触摸按钮被操作了
if current_state == 0: #如果新状态为0, 就是被按下的
print("按钮被按下")
pressed_time = time.time() #记下被按下时的时刻
longpress_time = pressed_time
else: #如果新状态为1, 就是被松开
print("按钮被松开", end='')
if pressed_time == longpress_time:
print(", 是短按")
else:
print(", 是长按")
else: #如果新旧状态相同, 就是没有操作按钮, 或者一直按着按钮
if current_state == 0: #如果按钮是被按着的
dur = time.time() - pressed_time #计算一下被按了多久
if dur > 2 and longpress_time < time.time(): #按住超过2秒就是长按, longpress_time 控制每秒打印一次长按时间
print("按钮被长按, 按了%d" % dur )
longpress_time = time.time() + 1
last_state = current_state #将新状态存起来
time.sleep(0.05) #50毫秒刷新一次按钮状态
if __name__ == '__main__':
main()

35
src/Sensor/scripts/tts.py Executable file
View File

@ -0,0 +1,35 @@
import smbus2 #调用树莓派IIC库
import time
import os
class TTS:
address = None
TTS_MODULE_I2C_ADDR = 0x40 #传感器的IIC地址
def __init__(self, bus=1):
self.address = self.TTS_MODULE_I2C_ADDR
self.bus = smbus2.SMBus(bus)
def TTSModuleSpeak(self, sign, words):
head = [0xFD,0x00,0x00,0x01,0x00] #文本播放命令
wordslist = words.encode("gb2312") #将文本编码格式设为GB2312
signdata = sign.encode("gb2312")
length = len(signdata) + len(wordslist) + 2
head[1] = length >> 8
head[2] = length
head.extend(list(signdata))
head.extend(list(wordslist))
write = smbus2.i2c_msg.write(self.address, head)
self.bus.i2c_rdwr(write)
time.sleep(0.05)
if __name__ == '__main__':
v = TTS()
#[h?]设置单词发音方式0为自动判断单词发音方式1为字母发音方式2为单词发音方式
#[v?]设置音量音量范围为0-10,10为最大音量。
v.TTSModuleSpeak("[h0][v10][m3]","你好,我是语音合成模块")
time.sleep(3) # 必要延时,等待播放完成
v.TTSModuleSpeak("[v8][m3]", "你好,我是语音合成模块")
time.sleep(3)
v.TTSModuleSpeak("[v3][m3]", "你好,我是语音合成模块")

206
src/chassis/CMakeLists.txt Normal file
View File

@ -0,0 +1,206 @@
cmake_minimum_required(VERSION 3.0.2)
project(chassis)
## Compile as C++11, supported in ROS Kinetic and newer
# add_compile_options(-std=c++11)
## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
roscpp
rospy
std_msgs
)
## System dependencies are found with CMake's conventions
# find_package(Boost REQUIRED COMPONENTS system)
## Uncomment this if the package has a setup.py. This macro ensures
## modules and global scripts declared therein get installed
## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
# catkin_python_setup()
################################################
## Declare ROS messages, services and actions ##
################################################
## To declare and build messages, services or actions from within this
## package, follow these steps:
## * Let MSG_DEP_SET be the set of packages whose message types you use in
## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
## * In the file package.xml:
## * add a build_depend tag for "message_generation"
## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
## but can be declared for certainty nonetheless:
## * add a exec_depend tag for "message_runtime"
## * In this file (CMakeLists.txt):
## * add "message_generation" and every package in MSG_DEP_SET to
## find_package(catkin REQUIRED COMPONENTS ...)
## * add "message_runtime" and every package in MSG_DEP_SET to
## catkin_package(CATKIN_DEPENDS ...)
## * uncomment the add_*_files sections below as needed
## and list every .msg/.srv/.action file to be processed
## * uncomment the generate_messages entry below
## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
## Generate messages in the 'msg' folder
# add_message_files(
# FILES
# Message1.msg
# Message2.msg
# )
## Generate services in the 'srv' folder
# add_service_files(
# FILES
# Service1.srv
# Service2.srv
# )
## Generate actions in the 'action' folder
# add_action_files(
# FILES
# Action1.action
# Action2.action
# )
## Generate added messages and services with any dependencies listed here
# generate_messages(
# DEPENDENCIES
# std_msgs
# )
################################################
## Declare ROS dynamic reconfigure parameters ##
################################################
## To declare and build dynamic reconfigure parameters within this
## package, follow these steps:
## * In the file package.xml:
## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
## * In this file (CMakeLists.txt):
## * add "dynamic_reconfigure" to
## find_package(catkin REQUIRED COMPONENTS ...)
## * uncomment the "generate_dynamic_reconfigure_options" section below
## and list every .cfg file to be processed
## Generate dynamic reconfigure parameters in the 'cfg' folder
# generate_dynamic_reconfigure_options(
# cfg/DynReconf1.cfg
# cfg/DynReconf2.cfg
# )
###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if your package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
catkin_package(
# INCLUDE_DIRS include
# LIBRARIES chassis
# CATKIN_DEPENDS roscpp rospy std_msgs
# DEPENDS system_lib
)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
include_directories(
# include
${catkin_INCLUDE_DIRS}
)
## Declare a C++ library
# add_library(${PROJECT_NAME}
# src/${PROJECT_NAME}/chassis.cpp
# )
## Add cmake target dependencies of the library
## as an example, code may need to be generated before libraries
## either from message generation or dynamic reconfigure
# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Declare a C++ executable
## With catkin_make all packages are built within a single CMake context
## The recommended prefix ensures that target names across packages don't collide
# add_executable(${PROJECT_NAME}_node src/chassis_node.cpp)
## Rename C++ executable without prefix
## The above recommended prefix causes long target names, the following renames the
## target back to the shorter version for ease of user use
## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
## Add cmake target dependencies of the executable
## same as for the library above
# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Specify libraries to link a library or executable target against
# target_link_libraries(${PROJECT_NAME}_node
# ${catkin_LIBRARIES}
# )
#############
## Install ##
#############
# all install targets should use catkin DESTINATION variables
# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
## Mark executable scripts (Python etc.) for installation
## in contrast to setup.py, you can choose the destination
# catkin_install_python(PROGRAMS
# scripts/my_python_script
# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark executables for installation
## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
# install(TARGETS ${PROJECT_NAME}_node
# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark libraries for installation
## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
# install(TARGETS ${PROJECT_NAME}
# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
# )
## Mark cpp header files for installation
# install(DIRECTORY include/${PROJECT_NAME}/
# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
# FILES_MATCHING PATTERN "*.h"
# PATTERN ".svn" EXCLUDE
# )
## Mark other files for installation (e.g. launch and bag files, etc.)
# install(FILES
# # myfile1
# # myfile2
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
# )
#############
## Testing ##
#############
## Add gtest based cpp test target and link libraries
# catkin_add_gtest(${PROJECT_NAME}-test test/test_chassis.cpp)
# if(TARGET ${PROJECT_NAME}-test)
# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
# endif()
## Add folders to be run by python nosetests
# catkin_add_nosetests(test)

68
src/chassis/package.xml Normal file
View File

@ -0,0 +1,68 @@
<?xml version="1.0"?>
<package format="2">
<name>chassis</name>
<version>0.0.0</version>
<description>The chassis package</description>
<!-- One maintainer tag required, multiple allowed, one person per tag -->
<!-- Example: -->
<!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
<maintainer email="hiwonder@todo.todo">hiwonder</maintainer>
<!-- One license tag required, multiple allowed, one license per tag -->
<!-- Commonly used license strings: -->
<!-- BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
<license>TODO</license>
<!-- Url tags are optional, but multiple are allowed, one per tag -->
<!-- Optional attribute type can be: website, bugtracker, or repository -->
<!-- Example: -->
<!-- <url type="website">http://wiki.ros.org/chassis</url> -->
<!-- Author tags are optional, multiple are allowed, one per tag -->
<!-- Authors do not have to be maintainers, but could be -->
<!-- Example: -->
<!-- <author email="jane.doe@example.com">Jane Doe</author> -->
<!-- The *depend tags are used to specify dependencies -->
<!-- Dependencies can be catkin packages or system dependencies -->
<!-- Examples: -->
<!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
<!-- <depend>roscpp</depend> -->
<!-- Note that this is equivalent to the following: -->
<!-- <build_depend>roscpp</build_depend> -->
<!-- <exec_depend>roscpp</exec_depend> -->
<!-- Use build_depend for packages you need at compile time: -->
<!-- <build_depend>message_generation</build_depend> -->
<!-- Use build_export_depend for packages you need in order to build against this package: -->
<!-- <build_export_depend>message_generation</build_export_depend> -->
<!-- Use buildtool_depend for build tool packages: -->
<!-- <buildtool_depend>catkin</buildtool_depend> -->
<!-- Use exec_depend for packages you need at runtime: -->
<!-- <exec_depend>message_runtime</exec_depend> -->
<!-- Use test_depend for packages you need only for testing: -->
<!-- <test_depend>gtest</test_depend> -->
<!-- Use doc_depend for packages you need only for building documentation: -->
<!-- <doc_depend>doxygen</doc_depend> -->
<buildtool_depend>catkin</buildtool_depend>
<build_depend>roscpp</build_depend>
<build_depend>rospy</build_depend>
<build_depend>std_msgs</build_depend>
<build_export_depend>roscpp</build_export_depend>
<build_export_depend>rospy</build_export_depend>
<build_export_depend>std_msgs</build_export_depend>
<exec_depend>roscpp</exec_depend>
<exec_depend>rospy</exec_depend>
<exec_depend>std_msgs</exec_depend>
<!-- The export tag contains other, unspecified, tags -->
<export>
<!-- Other tools can request additional information be placed here -->
</export>
</package>

View File

@ -0,0 +1,148 @@
#!/usr/bin/env python3
import sys
import time
import math
import signal
import hiwonder
import threading
from PyQt5.QtCore import Qt, QPoint, QRect
from PyQt5.QtWidgets import QApplication, QWidget
from PyQt5.QtGui import QPainter, QPixmap, QColor,QPen,QFont
# 画线行驶
# 实例化底盘驱动库
chassis = hiwonder.MecanumChassis()
# 角度转换函数
def azimuthAngle(p1, p2):
x1 = p1[0]
y1 = -p1[1]
x2 = p2[0]
y2 = -p2[1]
angle = math.degrees(math.atan2(y2 - y1, x2 - x1))
if angle < 0:
angle = 360 + angle
return angle
class Winform(QWidget):
def __init__(self, parent=None):
super(Winform, self).__init__(parent)
self.setWindowTitle("绘图画板")
self.pix = QPixmap() # 实例化一个 QPixmap 对象
self.lastPoint = QPoint() # 起始点
self.endPoint = QPoint() # 终点
self.initUi()
self.pp = QPainter(self.pix)
self.runner = None
self.num = 1
self.st = True
self.coords = []
def initUi(self):
# 窗口大小设置为600*500
self.resize(800, 600)
# 画布大小为400*400背景为白色
self.pix = QPixmap(800, 600)
self.pix.fill(Qt.white)
# 重绘的复写函数 主要在这里绘制
def paintEvent(self, event):
# 根据鼠标指针前后两个位置绘制直线
self.pp.setPen(QPen(QColor(0, 0, 0),3))
self.pp.drawLine(self.lastPoint, self.endPoint)
# 让前一个坐标值等于后一个坐标值,
# 这样就能实现画出连续的线
self.lastPoint = self.endPoint
painter = QPainter(self)
painter.drawPixmap(0, 0, self.pix) # 在画布上画出
# 鼠标按压事件
def mousePressEvent(self, event):
# 鼠标左键按下
if event.button() == Qt.LeftButton:
if self.st:
self.lastPoint = event.pos()
self.endPoint = self.lastPoint
self.st = False
else:
self.endPoint = event.pos()
# 进行重新绘制
self.update()
x, y = (event.pos().x(), event.pos().y())
self.coords.append((x,y))
self.pp.setPen(QPen(QColor(255, 0, 0),3))
self.pp.drawEllipse(x-7, y-7, 14, 14)
self.pp.drawText(x,y-12,str(self.num))
self.num += 1
self.update()
# 鼠标右键按下
elif event.button() == Qt.RightButton:
if self.runner is None:
if len(self.coords) > 0:
self.runner = threading.Thread(target=move, daemon=True)
self.runner.start()
# 移动控制函数
def move():
#print(form.coords)
form.pp.setPen(QPen(QColor(0, 255, 0),3))
form.pp.drawEllipse(form.coords[0][0]-7, form.coords[0][1]-7, 14, 14)
form.update() #更新显示
for i in range(len(form.coords)-1): #逐一运行所有线段
x1 = form.coords[i][0]
x2 = form.coords[i+1][0]
y1 = form.coords[i][1]
y2 = form.coords[i+1][1]
dx = x2 - x1
dy = y2 - y1
D = int(math.sqrt((dx**2)+(dy**2)))
angle = int(azimuthAngle(form.coords[i], form.coords[i+1])) #计算偏转角
print('D:',D, 'angle:',angle)
form.pp.setPen(QPen(QColor(0, 0, 255),3)) #设置正在运行的线段为蓝色
form.pp.drawLine(x1,y1, x2,y2)
form.update() #更新显示
chassis.set_velocity(100,angle,0) #驱动底盘电机
t = D / 80
time.sleep(t)
chassis.set_velocity(0,0,0) # 停止移动
time.sleep(0.02)
form.pp.setPen(QPen(QColor(0, 255, 0),3)) #设置运行过了的线段为绿色
form.pp.drawLine(x1,y1, x2,y2)
form.pp.drawEllipse(x2-7, y2-7, 14, 14)
form.update() #更新显示
form.pp.setPen(QPen(QColor(0, 255, 0),3))
form.pp.drawEllipse(form.coords[-1][0]-7, form.coords[-1][1]-7, 14, 14)
form.update()
print('Finish')
time.sleep(0.8)
form.runner = None
form.coords.clear()
QPainter.eraseRect(form.pp, QRect(0, 0, 800, 600)) #清除内容
form.update()
form.num = 1
form.st = True
# 停止运行函数
def shutdown(signum, frame):
print('shutdown')
chassis.set_velocity(0,0,0)
sys.exit()
signal.signal(signal.SIGINT, shutdown)
if __name__ == "__main__":
app = QApplication(sys.argv)
form = Winform()
form.show()
sys.exit(app.exec_())

Some files were not shown because too many files have changed in this diff Show More