This repository has been archived on 2023-10-09. You can view files and clone it, but cannot push or open issues or pull requests.
Files
blender-archive/source/blender/windowmanager/intern/wm_gesture.c

389 lines
10 KiB
C
Raw Normal View History

/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
2010-02-12 13:34:04 +00:00
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2008 Blender Foundation.
* All rights reserved.
*
*
* Contributor(s): Blender Foundation
*
* ***** END GPL LICENSE BLOCK *****
*/
2011-02-25 14:04:21 +00:00
/** \file blender/windowmanager/intern/wm_gesture.c
* \ingroup wm
*/
#include "DNA_screen_types.h"
#include "DNA_vec_types.h"
2.5: WM Compositing * Triple Buffer is now more complete: - Proper handling of window resize, duplicate, etc. - It now uses 3x3 textures (or less) if the power of two sizes do not match well. That still has a worst case wast of 23.4%, but better than 300%. - It can also use the ARB/NV/EXT_texture_rectangle extension now, which may be supported on hardware that does not support ARB_texture_non_power_of_two. - Gesture, menu and brushe redraws now require no redraws at all from the area regions. So even on a high poly scene just moving the paint cursor or opening a menu should be fast. * Testing can be done by setting the "Window Draw Method" in the User Preferences in the outliner. "Overlap" is still default, since "Triple Buffer" has not been tested on computers other than mine, would like to avoid crashing Blender on startup in case there is a common bug, but it's ready for testing now. - For reference "Full" draws the full window each time. - "Triple Buffer" should work for both swap copy and swap exchange systems, the latter still need the -E command line option for "Overlap". - Resizing and going fullscreen still gives flicker here but no more than "Full" drawing. * Partial Redraw was added. ED_region_tag_redraw_partial takes a rect in window coordinates to define a subarea of the region. On region draw it will then set glScissor to a smaller area, and ar->drawrct will always be set to either the partial or full window rect. The latter can then be used for clipping in the 3D view or clipping interface drawing. Neither is implemented yet.
2009-01-23 03:52:52 +00:00
#include "DNA_userdef_types.h"
#include "DNA_windowmanager_types.h"
#include "MEM_guardedalloc.h"
#include "BLI_blenlib.h"
#include "BLI_math.h"
#include "BLI_scanfill.h" /* lasso tessellation */
#include "BLI_utildefines.h"
#include "BLI_lasso.h"
#include "BKE_context.h"
#include "WM_api.h"
#include "WM_types.h"
#include "wm.h"
#include "wm_event_system.h"
#include "wm_subwindow.h"
#include "wm_draw.h"
#include "BIF_gl.h"
#include "BIF_glutil.h"
/* context checked on having screen, window and area */
wmGesture *WM_gesture_new(bContext *C, const wmEvent *event, int type)
{
wmGesture *gesture = MEM_callocN(sizeof(wmGesture), "new gesture");
wmWindow *window = CTX_wm_window(C);
ARegion *ar = CTX_wm_region(C);
int sx, sy;
BLI_addtail(&window->gesture, gesture);
gesture->type = type;
gesture->event_type = event->type;
gesture->swinid = ar->swinid; /* means only in area-region context! */
wm_subwindow_getorigin(window, gesture->swinid, &sx, &sy);
if (ELEM5(type, WM_GESTURE_RECT, WM_GESTURE_CROSS_RECT, WM_GESTURE_TWEAK,
WM_GESTURE_CIRCLE, WM_GESTURE_STRAIGHTLINE))
{
rcti *rect = MEM_callocN(sizeof(rcti), "gesture rect new");
gesture->customdata = rect;
rect->xmin = event->x - sx;
rect->ymin = event->y - sy;
if (type == WM_GESTURE_CIRCLE) {
#ifdef GESTURE_MEMORY
rect->xmax = circle_select_size;
#else
rect->xmax = 25; // XXX temp
#endif
}
else {
rect->xmax = event->x - sx;
rect->ymax = event->y - sy;
}
}
else if (ELEM(type, WM_GESTURE_LINES, WM_GESTURE_LASSO)) {
short *lasso;
gesture->customdata = lasso = MEM_callocN(2 * sizeof(short) * WM_LASSO_MIN_POINTS, "lasso points");
lasso[0] = event->x - sx;
lasso[1] = event->y - sy;
gesture->points = 1;
gesture->size = WM_LASSO_MIN_POINTS;
}
return gesture;
}
void WM_gesture_end(bContext *C, wmGesture *gesture)
{
wmWindow *win = CTX_wm_window(C);
Drag and drop 2.5 integration! Finally, slashdot regulars can use Blender too now! :) ** Drag works as follows: - drag-able items are defined by the standard interface ui toolkit - each button can get this feature, via uiButSetDragXXX(but, ...). There are calls to define drag-able images, ID blocks, RNA paths, file paths, and so on. By default you drag an icon, exceptionally an ImBuf - Drag items are registered centrally in the WM, it allows more drag items simultaneous too, but not implemented ** Drop works as follows: - On mouse release, and if drag items exist in the WM, it converts the mouse event to an EVT_DROP type. This event then gets the full drag info as customdata - drop regions are defined with WM_dropbox_add(), similar to keymaps you can make a "drop map" this way, which become 'drop map handlers' in the queues. - next to that the UI kit handles some common button types (like accepting ID or names) to be catching a drop event too. - Every "drop box" has two callbacks: - poll() = check if the event drag data is relevant for this box - copy() = fill in custom properties in the dropbox to initialize an operator - The dropbox handler then calls its standard Operator with its dropbox properties. ** Currently implemented Drag items: - ID icons in browse buttons - ID icons in context menu of properties region - ID icons in outliner and rna viewer - FileBrowser icons - FileBrowser preview images Drag-able icons are subtly visualized by making them brighter a bit on mouse-over. In case the icon is a button or UI element too (most cases), the drag-able feature will make the item react to mouse-release instead of mouse-press. Drop options: - UI buttons: ID and text buttons (paste name) - View3d: Object ID drop copies object - View3d: Material ID drop assigns to object under cursor - View3d: Image ID drop assigns to object UV texture under cursor - Sequencer: Path drop will add either Image or Movie strip - Image window: Path drop will open image ** Drag and drop Notes: - Dropping into another Blender window (from same application) works too. I've added code that passes on mousemoves and clicks to other windows, without activating them though. This does make using multi-window Blender a bit friendler. - Dropping a file path to an image, is not the same as dropping an Image ID... keep this in mind. Sequencer for example wants paths to be dropped, textures in 3d window wants an Image ID. - Although drop boxes could be defined via Python, I suggest they're part of the UI and editor design (= how we want an editor to work), and not default offered configurable like keymaps. - At the moment only one item can be dragged at a time. This is for several reasons.... For one, Blender doesn't have a well defined uniform way to define "what is selected" (files, outliner items, etc). Secondly there's potential conflicts on what todo when you drop mixed drag sets on spots. All undefined stuff... nice for later. - Example to bypass the above: a collection of images that form a strip, should be represented in filewindow as a single sequence anyway. This then will fit well and gets handled neatly by design. - Another option to check is to allow multiple options per drop... it could show the operator as a sort of menu, allowing arrow or scrollwheel to choose. For time being I'd prefer to try to design a singular drop though, just offer only one drop action per data type on given spots. - What does work already, but a tad slow, is to use a function that detects an object (type) under cursor, so a drag item's option can be further refined (like drop object on object = parent). (disabled) ** More notes - Added saving for Region layouts (like split points for toolbar) - Label buttons now handle mouse over - File list: added full path entry for drop feature. - Filesel bugfix: wm_operator_exec() got called there and fully handled, while WM event code tried same. Added new OPERATOR_HANDLED flag for this. Maybe python needs it too? - Cocoa: added window move event, so multi-win setups work OK (didnt save). - Interface_handlers.c: removed win->active - Severe area copy bug: area handlers were not set to NULL - Filesel bugfix: next/prev folder list was not copied on area copies ** Leftover todos - Cocoa windows seem to hang on cases still... needs check - Cocoa 'draw overlap' swap doesn't work - Cocoa window loses focus permanently on using Spotlight (for these reasons, makefile building has Carbon as default atm) - ListView templates in UI cannot become dragged yet, needs review... it consists of two overlapping UI elements, preventing handling icon clicks. - There's already Ghost library code to handle dropping from OS into Blender window. I've noticed this code is unfinished for Macs, but seems to be complete for Windows. Needs test... currently, an external drop event will print in console when succesfully delivered to Blender's WM.
2010-01-26 18:18:21 +00:00
if (win->tweak == gesture)
win->tweak = NULL;
Drag and drop 2.5 integration! Finally, slashdot regulars can use Blender too now! :) ** Drag works as follows: - drag-able items are defined by the standard interface ui toolkit - each button can get this feature, via uiButSetDragXXX(but, ...). There are calls to define drag-able images, ID blocks, RNA paths, file paths, and so on. By default you drag an icon, exceptionally an ImBuf - Drag items are registered centrally in the WM, it allows more drag items simultaneous too, but not implemented ** Drop works as follows: - On mouse release, and if drag items exist in the WM, it converts the mouse event to an EVT_DROP type. This event then gets the full drag info as customdata - drop regions are defined with WM_dropbox_add(), similar to keymaps you can make a "drop map" this way, which become 'drop map handlers' in the queues. - next to that the UI kit handles some common button types (like accepting ID or names) to be catching a drop event too. - Every "drop box" has two callbacks: - poll() = check if the event drag data is relevant for this box - copy() = fill in custom properties in the dropbox to initialize an operator - The dropbox handler then calls its standard Operator with its dropbox properties. ** Currently implemented Drag items: - ID icons in browse buttons - ID icons in context menu of properties region - ID icons in outliner and rna viewer - FileBrowser icons - FileBrowser preview images Drag-able icons are subtly visualized by making them brighter a bit on mouse-over. In case the icon is a button or UI element too (most cases), the drag-able feature will make the item react to mouse-release instead of mouse-press. Drop options: - UI buttons: ID and text buttons (paste name) - View3d: Object ID drop copies object - View3d: Material ID drop assigns to object under cursor - View3d: Image ID drop assigns to object UV texture under cursor - Sequencer: Path drop will add either Image or Movie strip - Image window: Path drop will open image ** Drag and drop Notes: - Dropping into another Blender window (from same application) works too. I've added code that passes on mousemoves and clicks to other windows, without activating them though. This does make using multi-window Blender a bit friendler. - Dropping a file path to an image, is not the same as dropping an Image ID... keep this in mind. Sequencer for example wants paths to be dropped, textures in 3d window wants an Image ID. - Although drop boxes could be defined via Python, I suggest they're part of the UI and editor design (= how we want an editor to work), and not default offered configurable like keymaps. - At the moment only one item can be dragged at a time. This is for several reasons.... For one, Blender doesn't have a well defined uniform way to define "what is selected" (files, outliner items, etc). Secondly there's potential conflicts on what todo when you drop mixed drag sets on spots. All undefined stuff... nice for later. - Example to bypass the above: a collection of images that form a strip, should be represented in filewindow as a single sequence anyway. This then will fit well and gets handled neatly by design. - Another option to check is to allow multiple options per drop... it could show the operator as a sort of menu, allowing arrow or scrollwheel to choose. For time being I'd prefer to try to design a singular drop though, just offer only one drop action per data type on given spots. - What does work already, but a tad slow, is to use a function that detects an object (type) under cursor, so a drag item's option can be further refined (like drop object on object = parent). (disabled) ** More notes - Added saving for Region layouts (like split points for toolbar) - Label buttons now handle mouse over - File list: added full path entry for drop feature. - Filesel bugfix: wm_operator_exec() got called there and fully handled, while WM event code tried same. Added new OPERATOR_HANDLED flag for this. Maybe python needs it too? - Cocoa: added window move event, so multi-win setups work OK (didnt save). - Interface_handlers.c: removed win->active - Severe area copy bug: area handlers were not set to NULL - Filesel bugfix: next/prev folder list was not copied on area copies ** Leftover todos - Cocoa windows seem to hang on cases still... needs check - Cocoa 'draw overlap' swap doesn't work - Cocoa window loses focus permanently on using Spotlight (for these reasons, makefile building has Carbon as default atm) - ListView templates in UI cannot become dragged yet, needs review... it consists of two overlapping UI elements, preventing handling icon clicks. - There's already Ghost library code to handle dropping from OS into Blender window. I've noticed this code is unfinished for Macs, but seems to be complete for Windows. Needs test... currently, an external drop event will print in console when succesfully delivered to Blender's WM.
2010-01-26 18:18:21 +00:00
BLI_remlink(&win->gesture, gesture);
MEM_freeN(gesture->customdata);
if (gesture->userdata) {
MEM_freeN(gesture->userdata);
}
MEM_freeN(gesture);
}
Drag and drop 2.5 integration! Finally, slashdot regulars can use Blender too now! :) ** Drag works as follows: - drag-able items are defined by the standard interface ui toolkit - each button can get this feature, via uiButSetDragXXX(but, ...). There are calls to define drag-able images, ID blocks, RNA paths, file paths, and so on. By default you drag an icon, exceptionally an ImBuf - Drag items are registered centrally in the WM, it allows more drag items simultaneous too, but not implemented ** Drop works as follows: - On mouse release, and if drag items exist in the WM, it converts the mouse event to an EVT_DROP type. This event then gets the full drag info as customdata - drop regions are defined with WM_dropbox_add(), similar to keymaps you can make a "drop map" this way, which become 'drop map handlers' in the queues. - next to that the UI kit handles some common button types (like accepting ID or names) to be catching a drop event too. - Every "drop box" has two callbacks: - poll() = check if the event drag data is relevant for this box - copy() = fill in custom properties in the dropbox to initialize an operator - The dropbox handler then calls its standard Operator with its dropbox properties. ** Currently implemented Drag items: - ID icons in browse buttons - ID icons in context menu of properties region - ID icons in outliner and rna viewer - FileBrowser icons - FileBrowser preview images Drag-able icons are subtly visualized by making them brighter a bit on mouse-over. In case the icon is a button or UI element too (most cases), the drag-able feature will make the item react to mouse-release instead of mouse-press. Drop options: - UI buttons: ID and text buttons (paste name) - View3d: Object ID drop copies object - View3d: Material ID drop assigns to object under cursor - View3d: Image ID drop assigns to object UV texture under cursor - Sequencer: Path drop will add either Image or Movie strip - Image window: Path drop will open image ** Drag and drop Notes: - Dropping into another Blender window (from same application) works too. I've added code that passes on mousemoves and clicks to other windows, without activating them though. This does make using multi-window Blender a bit friendler. - Dropping a file path to an image, is not the same as dropping an Image ID... keep this in mind. Sequencer for example wants paths to be dropped, textures in 3d window wants an Image ID. - Although drop boxes could be defined via Python, I suggest they're part of the UI and editor design (= how we want an editor to work), and not default offered configurable like keymaps. - At the moment only one item can be dragged at a time. This is for several reasons.... For one, Blender doesn't have a well defined uniform way to define "what is selected" (files, outliner items, etc). Secondly there's potential conflicts on what todo when you drop mixed drag sets on spots. All undefined stuff... nice for later. - Example to bypass the above: a collection of images that form a strip, should be represented in filewindow as a single sequence anyway. This then will fit well and gets handled neatly by design. - Another option to check is to allow multiple options per drop... it could show the operator as a sort of menu, allowing arrow or scrollwheel to choose. For time being I'd prefer to try to design a singular drop though, just offer only one drop action per data type on given spots. - What does work already, but a tad slow, is to use a function that detects an object (type) under cursor, so a drag item's option can be further refined (like drop object on object = parent). (disabled) ** More notes - Added saving for Region layouts (like split points for toolbar) - Label buttons now handle mouse over - File list: added full path entry for drop feature. - Filesel bugfix: wm_operator_exec() got called there and fully handled, while WM event code tried same. Added new OPERATOR_HANDLED flag for this. Maybe python needs it too? - Cocoa: added window move event, so multi-win setups work OK (didnt save). - Interface_handlers.c: removed win->active - Severe area copy bug: area handlers were not set to NULL - Filesel bugfix: next/prev folder list was not copied on area copies ** Leftover todos - Cocoa windows seem to hang on cases still... needs check - Cocoa 'draw overlap' swap doesn't work - Cocoa window loses focus permanently on using Spotlight (for these reasons, makefile building has Carbon as default atm) - ListView templates in UI cannot become dragged yet, needs review... it consists of two overlapping UI elements, preventing handling icon clicks. - There's already Ghost library code to handle dropping from OS into Blender window. I've noticed this code is unfinished for Macs, but seems to be complete for Windows. Needs test... currently, an external drop event will print in console when succesfully delivered to Blender's WM.
2010-01-26 18:18:21 +00:00
void WM_gestures_remove(bContext *C)
{
wmWindow *win = CTX_wm_window(C);
Drag and drop 2.5 integration! Finally, slashdot regulars can use Blender too now! :) ** Drag works as follows: - drag-able items are defined by the standard interface ui toolkit - each button can get this feature, via uiButSetDragXXX(but, ...). There are calls to define drag-able images, ID blocks, RNA paths, file paths, and so on. By default you drag an icon, exceptionally an ImBuf - Drag items are registered centrally in the WM, it allows more drag items simultaneous too, but not implemented ** Drop works as follows: - On mouse release, and if drag items exist in the WM, it converts the mouse event to an EVT_DROP type. This event then gets the full drag info as customdata - drop regions are defined with WM_dropbox_add(), similar to keymaps you can make a "drop map" this way, which become 'drop map handlers' in the queues. - next to that the UI kit handles some common button types (like accepting ID or names) to be catching a drop event too. - Every "drop box" has two callbacks: - poll() = check if the event drag data is relevant for this box - copy() = fill in custom properties in the dropbox to initialize an operator - The dropbox handler then calls its standard Operator with its dropbox properties. ** Currently implemented Drag items: - ID icons in browse buttons - ID icons in context menu of properties region - ID icons in outliner and rna viewer - FileBrowser icons - FileBrowser preview images Drag-able icons are subtly visualized by making them brighter a bit on mouse-over. In case the icon is a button or UI element too (most cases), the drag-able feature will make the item react to mouse-release instead of mouse-press. Drop options: - UI buttons: ID and text buttons (paste name) - View3d: Object ID drop copies object - View3d: Material ID drop assigns to object under cursor - View3d: Image ID drop assigns to object UV texture under cursor - Sequencer: Path drop will add either Image or Movie strip - Image window: Path drop will open image ** Drag and drop Notes: - Dropping into another Blender window (from same application) works too. I've added code that passes on mousemoves and clicks to other windows, without activating them though. This does make using multi-window Blender a bit friendler. - Dropping a file path to an image, is not the same as dropping an Image ID... keep this in mind. Sequencer for example wants paths to be dropped, textures in 3d window wants an Image ID. - Although drop boxes could be defined via Python, I suggest they're part of the UI and editor design (= how we want an editor to work), and not default offered configurable like keymaps. - At the moment only one item can be dragged at a time. This is for several reasons.... For one, Blender doesn't have a well defined uniform way to define "what is selected" (files, outliner items, etc). Secondly there's potential conflicts on what todo when you drop mixed drag sets on spots. All undefined stuff... nice for later. - Example to bypass the above: a collection of images that form a strip, should be represented in filewindow as a single sequence anyway. This then will fit well and gets handled neatly by design. - Another option to check is to allow multiple options per drop... it could show the operator as a sort of menu, allowing arrow or scrollwheel to choose. For time being I'd prefer to try to design a singular drop though, just offer only one drop action per data type on given spots. - What does work already, but a tad slow, is to use a function that detects an object (type) under cursor, so a drag item's option can be further refined (like drop object on object = parent). (disabled) ** More notes - Added saving for Region layouts (like split points for toolbar) - Label buttons now handle mouse over - File list: added full path entry for drop feature. - Filesel bugfix: wm_operator_exec() got called there and fully handled, while WM event code tried same. Added new OPERATOR_HANDLED flag for this. Maybe python needs it too? - Cocoa: added window move event, so multi-win setups work OK (didnt save). - Interface_handlers.c: removed win->active - Severe area copy bug: area handlers were not set to NULL - Filesel bugfix: next/prev folder list was not copied on area copies ** Leftover todos - Cocoa windows seem to hang on cases still... needs check - Cocoa 'draw overlap' swap doesn't work - Cocoa window loses focus permanently on using Spotlight (for these reasons, makefile building has Carbon as default atm) - ListView templates in UI cannot become dragged yet, needs review... it consists of two overlapping UI elements, preventing handling icon clicks. - There's already Ghost library code to handle dropping from OS into Blender window. I've noticed this code is unfinished for Macs, but seems to be complete for Windows. Needs test... currently, an external drop event will print in console when succesfully delivered to Blender's WM.
2010-01-26 18:18:21 +00:00
while (win->gesture.first)
Drag and drop 2.5 integration! Finally, slashdot regulars can use Blender too now! :) ** Drag works as follows: - drag-able items are defined by the standard interface ui toolkit - each button can get this feature, via uiButSetDragXXX(but, ...). There are calls to define drag-able images, ID blocks, RNA paths, file paths, and so on. By default you drag an icon, exceptionally an ImBuf - Drag items are registered centrally in the WM, it allows more drag items simultaneous too, but not implemented ** Drop works as follows: - On mouse release, and if drag items exist in the WM, it converts the mouse event to an EVT_DROP type. This event then gets the full drag info as customdata - drop regions are defined with WM_dropbox_add(), similar to keymaps you can make a "drop map" this way, which become 'drop map handlers' in the queues. - next to that the UI kit handles some common button types (like accepting ID or names) to be catching a drop event too. - Every "drop box" has two callbacks: - poll() = check if the event drag data is relevant for this box - copy() = fill in custom properties in the dropbox to initialize an operator - The dropbox handler then calls its standard Operator with its dropbox properties. ** Currently implemented Drag items: - ID icons in browse buttons - ID icons in context menu of properties region - ID icons in outliner and rna viewer - FileBrowser icons - FileBrowser preview images Drag-able icons are subtly visualized by making them brighter a bit on mouse-over. In case the icon is a button or UI element too (most cases), the drag-able feature will make the item react to mouse-release instead of mouse-press. Drop options: - UI buttons: ID and text buttons (paste name) - View3d: Object ID drop copies object - View3d: Material ID drop assigns to object under cursor - View3d: Image ID drop assigns to object UV texture under cursor - Sequencer: Path drop will add either Image or Movie strip - Image window: Path drop will open image ** Drag and drop Notes: - Dropping into another Blender window (from same application) works too. I've added code that passes on mousemoves and clicks to other windows, without activating them though. This does make using multi-window Blender a bit friendler. - Dropping a file path to an image, is not the same as dropping an Image ID... keep this in mind. Sequencer for example wants paths to be dropped, textures in 3d window wants an Image ID. - Although drop boxes could be defined via Python, I suggest they're part of the UI and editor design (= how we want an editor to work), and not default offered configurable like keymaps. - At the moment only one item can be dragged at a time. This is for several reasons.... For one, Blender doesn't have a well defined uniform way to define "what is selected" (files, outliner items, etc). Secondly there's potential conflicts on what todo when you drop mixed drag sets on spots. All undefined stuff... nice for later. - Example to bypass the above: a collection of images that form a strip, should be represented in filewindow as a single sequence anyway. This then will fit well and gets handled neatly by design. - Another option to check is to allow multiple options per drop... it could show the operator as a sort of menu, allowing arrow or scrollwheel to choose. For time being I'd prefer to try to design a singular drop though, just offer only one drop action per data type on given spots. - What does work already, but a tad slow, is to use a function that detects an object (type) under cursor, so a drag item's option can be further refined (like drop object on object = parent). (disabled) ** More notes - Added saving for Region layouts (like split points for toolbar) - Label buttons now handle mouse over - File list: added full path entry for drop feature. - Filesel bugfix: wm_operator_exec() got called there and fully handled, while WM event code tried same. Added new OPERATOR_HANDLED flag for this. Maybe python needs it too? - Cocoa: added window move event, so multi-win setups work OK (didnt save). - Interface_handlers.c: removed win->active - Severe area copy bug: area handlers were not set to NULL - Filesel bugfix: next/prev folder list was not copied on area copies ** Leftover todos - Cocoa windows seem to hang on cases still... needs check - Cocoa 'draw overlap' swap doesn't work - Cocoa window loses focus permanently on using Spotlight (for these reasons, makefile building has Carbon as default atm) - ListView templates in UI cannot become dragged yet, needs review... it consists of two overlapping UI elements, preventing handling icon clicks. - There's already Ghost library code to handle dropping from OS into Blender window. I've noticed this code is unfinished for Macs, but seems to be complete for Windows. Needs test... currently, an external drop event will print in console when succesfully delivered to Blender's WM.
2010-01-26 18:18:21 +00:00
WM_gesture_end(C, win->gesture.first);
}
/* tweak and line gestures */
int wm_gesture_evaluate(wmGesture *gesture)
{
if (gesture->type == WM_GESTURE_TWEAK) {
rcti *rect = gesture->customdata;
int dx = BLI_rcti_size_x(rect);
int dy = BLI_rcti_size_y(rect);
if (abs(dx) + abs(dy) > U.tweak_threshold) {
int theta = iroundf(4.0f * atan2f((float)dy, (float)dx) / (float)M_PI);
int val = EVT_GESTURE_W;
if (theta == 0) val = EVT_GESTURE_E;
else if (theta == 1) val = EVT_GESTURE_NE;
else if (theta == 2) val = EVT_GESTURE_N;
else if (theta == 3) val = EVT_GESTURE_NW;
else if (theta == -1) val = EVT_GESTURE_SE;
else if (theta == -2) val = EVT_GESTURE_S;
else if (theta == -3) val = EVT_GESTURE_SW;
#if 0
/* debug */
if (val == 1) printf("tweak north\n");
if (val == 2) printf("tweak north-east\n");
if (val == 3) printf("tweak east\n");
if (val == 4) printf("tweak south-east\n");
if (val == 5) printf("tweak south\n");
if (val == 6) printf("tweak south-west\n");
if (val == 7) printf("tweak west\n");
if (val == 8) printf("tweak north-west\n");
2012-05-19 13:55:54 +00:00
#endif
return val;
}
}
return 0;
}
/* ******************* gesture draw ******************* */
static void wm_gesture_draw_rect(wmGesture *gt)
{
rcti *rect = (rcti *)gt->customdata;
glEnable(GL_BLEND);
glColor4f(1.0, 1.0, 1.0, 0.05);
glBegin(GL_QUADS);
glVertex2s(rect->xmax, rect->ymin);
glVertex2s(rect->xmax, rect->ymax);
glVertex2s(rect->xmin, rect->ymax);
glVertex2s(rect->xmin, rect->ymin);
glEnd();
glDisable(GL_BLEND);
glEnable(GL_LINE_STIPPLE);
glColor3ub(96, 96, 96);
glLineStipple(1, 0xCCCC);
sdrawbox(rect->xmin, rect->ymin, rect->xmax, rect->ymax);
glColor3ub(255, 255, 255);
glLineStipple(1, 0x3333);
sdrawbox(rect->xmin, rect->ymin, rect->xmax, rect->ymax);
glDisable(GL_LINE_STIPPLE);
}
static void wm_gesture_draw_line(wmGesture *gt)
{
rcti *rect = (rcti *)gt->customdata;
glEnable(GL_LINE_STIPPLE);
glColor3ub(96, 96, 96);
glLineStipple(1, 0xAAAA);
sdrawline(rect->xmin, rect->ymin, rect->xmax, rect->ymax);
glColor3ub(255, 255, 255);
glLineStipple(1, 0x5555);
sdrawline(rect->xmin, rect->ymin, rect->xmax, rect->ymax);
glDisable(GL_LINE_STIPPLE);
}
static void wm_gesture_draw_circle(wmGesture *gt)
{
rcti *rect = (rcti *)gt->customdata;
glTranslatef((float)rect->xmin, (float)rect->ymin, 0.0f);
glEnable(GL_BLEND);
glColor4f(1.0, 1.0, 1.0, 0.05);
glutil_draw_filled_arc(0.0, M_PI * 2.0, rect->xmax, 40);
glDisable(GL_BLEND);
glEnable(GL_LINE_STIPPLE);
glColor3ub(96, 96, 96);
glLineStipple(1, 0xAAAA);
glutil_draw_lined_arc(0.0, M_PI * 2.0, rect->xmax, 40);
glColor3ub(255, 255, 255);
glLineStipple(1, 0x5555);
glutil_draw_lined_arc(0.0, M_PI * 2.0, rect->xmax, 40);
glDisable(GL_LINE_STIPPLE);
2012-07-04 20:47:12 +00:00
glTranslatef(-rect->xmin, -rect->ymin, 0.0f);
}
struct LassoFillData {
unsigned int *px;
int width;
};
static void draw_filled_lasso_px_cb(int x, int y, void *user_data)
{
struct LassoFillData *data = user_data;
unsigned char *col = (unsigned char *)&(data->px[(y * data->width) + x]);
col[0] = col[1] = col[2] = 0xff;
col[3] = 0x10;
}
static void draw_filled_lasso(wmWindow *win, wmGesture *gt)
{
short *lasso = (short *)gt->customdata;
const int tot = gt->points;
int (*moves)[2] = MEM_mallocN(sizeof(*moves) * (tot + 1), __func__);
int i;
rcti rect;
rcti rect_win;
for (i = 0; i < tot; i++, lasso += 2) {
moves[i][0] = lasso[0];
moves[i][1] = lasso[1];
}
BLI_lasso_boundbox(&rect, (const int (*)[2])moves, tot);
wm_subwindow_getrect(win, gt->swinid, &rect_win);
BLI_rcti_translate(&rect, rect_win.xmin, rect_win.ymin);
BLI_rcti_isect(&rect_win, &rect, &rect);
BLI_rcti_translate(&rect, -rect_win.xmin, -rect_win.ymin);
/* highly unlikely this will fail, but could crash if (tot == 0) */
if (BLI_rcti_is_empty(&rect) == false) {
const int w = BLI_rcti_size_x(&rect);
const int h = BLI_rcti_size_y(&rect);
unsigned int *pixel_buf = MEM_callocN(sizeof(*pixel_buf) * w * h, __func__);
struct LassoFillData lasso_fill_data = {pixel_buf, w};
fill_poly_v2i_n(
rect.xmin, rect.ymin, rect.xmax, rect.ymax,
(const int (*)[2])moves, tot,
draw_filled_lasso_px_cb, &lasso_fill_data);
glEnable(GL_BLEND);
// glColor4f(1.0, 1.0, 1.0, 0.05);
glRasterPos2f(rect.xmin, rect.ymin);
glDrawPixels(w, h, GL_RGBA, GL_UNSIGNED_BYTE, pixel_buf);
glDisable(GL_BLEND);
MEM_freeN(pixel_buf);
}
MEM_freeN(moves);
}
static void wm_gesture_draw_lasso(wmWindow *win, wmGesture *gt, bool filled)
{
short *lasso = (short *)gt->customdata;
int i;
if (filled) {
draw_filled_lasso(win, gt);
}
glEnable(GL_LINE_STIPPLE);
glColor3ub(96, 96, 96);
glLineStipple(1, 0xAAAA);
glBegin(GL_LINE_STRIP);
for (i = 0; i < gt->points; i++, lasso += 2)
glVertex2sv(lasso);
if (gt->type == WM_GESTURE_LASSO)
glVertex2sv((short *)gt->customdata);
glEnd();
glColor3ub(255, 255, 255);
glLineStipple(1, 0x5555);
glBegin(GL_LINE_STRIP);
lasso = (short *)gt->customdata;
for (i = 0; i < gt->points; i++, lasso += 2)
glVertex2sv(lasso);
if (gt->type == WM_GESTURE_LASSO)
glVertex2sv((short *)gt->customdata);
glEnd();
glDisable(GL_LINE_STIPPLE);
}
static void wm_gesture_draw_cross(wmWindow *win, wmGesture *gt)
{
rcti *rect = (rcti *)gt->customdata;
const int winsize_x = WM_window_pixels_x(win);
const int winsize_y = WM_window_pixels_y(win);
glEnable(GL_LINE_STIPPLE);
glColor3ub(96, 96, 96);
glLineStipple(1, 0xCCCC);
sdrawline(rect->xmin - winsize_x, rect->ymin, rect->xmin + winsize_x, rect->ymin);
sdrawline(rect->xmin, rect->ymin - winsize_y, rect->xmin, rect->ymin + winsize_y);
glColor3ub(255, 255, 255);
glLineStipple(1, 0x3333);
sdrawline(rect->xmin - winsize_x, rect->ymin, rect->xmin + winsize_x, rect->ymin);
sdrawline(rect->xmin, rect->ymin - winsize_y, rect->xmin, rect->ymin + winsize_y);
glDisable(GL_LINE_STIPPLE);
}
/* called in wm_draw.c */
void wm_gesture_draw(wmWindow *win)
{
wmGesture *gt = (wmGesture *)win->gesture.first;
for (; gt; gt = gt->next) {
/* all in subwindow space */
wmSubWindowSet(win, gt->swinid);
if (gt->type == WM_GESTURE_RECT)
wm_gesture_draw_rect(gt);
2012-05-27 19:40:36 +00:00
// else if (gt->type == WM_GESTURE_TWEAK)
// wm_gesture_draw_line(gt);
else if (gt->type == WM_GESTURE_CIRCLE)
wm_gesture_draw_circle(gt);
else if (gt->type == WM_GESTURE_CROSS_RECT) {
if (gt->mode == 1)
wm_gesture_draw_rect(gt);
else
wm_gesture_draw_cross(win, gt);
}
else if (gt->type == WM_GESTURE_LINES)
wm_gesture_draw_lasso(win, gt, false);
else if (gt->type == WM_GESTURE_LASSO)
wm_gesture_draw_lasso(win, gt, true);
else if (gt->type == WM_GESTURE_STRAIGHTLINE)
wm_gesture_draw_line(gt);
}
}
void wm_gesture_tag_redraw(bContext *C)
{
wmWindow *win = CTX_wm_window(C);
bScreen *screen = CTX_wm_screen(C);
ARegion *ar = CTX_wm_region(C);
if (screen)
screen->do_draw_gesture = TRUE;
wm_tag_redraw_overlay(win, ar);
}