2011-11-04 76 views
5

其实我有一个应用程序Android 1.5与GLSurfaceView类,它在屏幕上显示一个简单的正方形多边形。如何用手指移动OpenGL广场?

我想学习如何添加新功能,移动用手指触摸它的正方形的功能。我的意思是当用户触摸正方形并移动手指时,正方形应该贴在手指上,直到手指放开屏幕。

任何教程/代码示例/帮助将被折衷。

我的代码:

public class MySurfaceView extends GLSurfaceView implements Renderer { 
private Context context; 
private Square square; 
private float xrot;     //X Rotation 
private float yrot;     //Y Rotation 
private float zrot;     //Z Rotation 
private float xspeed;    //X Rotation Speed 
private float yspeed;    //Y Rotation Speed 
private float z = -1.15f;   //Profundidad en el eje Z 
private float oldX; //valor anterior de X, para rotación 
private float oldY; //valor anterior de Y, para rotación 
private final float TOUCH_SCALE = 0.2f;  //necesario para la rotación 

//create the matrix grabber object in your initialization code 
private MatrixGrabber mg = new MatrixGrabber();   

private boolean firstTimeDone=false; //true si la aplicación ya ha sido inicializada. 

public MySurfaceView(Context context, Bitmap image) { 
    super(context); 
    this.context = context; 
    setEGLConfigChooser(8, 8, 8, 8, 16, 0); //fondo transparente 
    getHolder().setFormat(PixelFormat.TRANSLUCENT); //fondo transparente 
    //Transformamos esta clase en renderizadora 
    this.setRenderer(this); 
    //Request focus, para que los botones reaccionen 
    this.requestFocus(); 
    this.setFocusableInTouchMode(true); 
    square = new Square(image);         
} 

public void onSurfaceCreated(GL10 gl, EGLConfig config) {  
    gl.glDisable(GL10.GL_DITHER);    //dithering OFF 
    gl.glEnable(GL10.GL_TEXTURE_2D);   //Texture Mapping ON 
    gl.glShadeModel(GL10.GL_SMOOTH);   //Smooth Shading 
    gl.glClearDepthf(1.0f);      //Depth Buffer Setup 
    gl.glEnable(GL10.GL_DEPTH_TEST);   //Depth Testing ON 
    gl.glDepthFunc(GL10.GL_LEQUAL); 
    gl.glClearColor(0,0,0,0); //fondo transparente 
    gl.glHint(GL10.GL_PERSPECTIVE_CORRECTION_HINT, GL10.GL_NICEST);   
    //Cargamos la textura del cubo. 
    square.loadGLTexture(gl, this.context); 
} 

public void onDrawFrame(GL10 gl) { 
    //Limpiamos pantalla y Depth Buffer 
    gl.glClear(GL10.GL_COLOR_BUFFER_BIT | GL10.GL_DEPTH_BUFFER_BIT); 
    gl.glLoadIdentity(); 
    //Dibujado 
    gl.glTranslatef(0.0f, 0.0f, z);   //Move z units into the screen 
    gl.glScalef(0.8f, 0.8f, 0.8f);   //Escalamos para que quepa en la pantalla 
    //Rotamos sobre los ejes. 
    gl.glRotatef(xrot, 1.0f, 0.0f, 0.0f); //X 
    gl.glRotatef(yrot, 0.0f, 1.0f, 0.0f); //Y 
    gl.glRotatef(zrot, 0.0f, 0.0f, 1.0f); //Z 
    //Dibujamos el cuadrado 
    square.draw(gl);  
    //Factores de rotación. 
    xrot += xspeed; 
    yrot += yspeed;   


    if (!firstTimeDone) 
    {  
     /////////////// NEW CODE FOR SCALING THE AR IMAGE TO THE DESIRED WIDTH /////////////////    
     mg.getCurrentProjection(gl); 
     mg.getCurrentModelView(gl);      
     float [] modelMatrix = new float[16]; 
     float [] projMatrix = new float[16]; 
     modelMatrix=mg.mModelView; 
     projMatrix=mg.mProjection;   
     int [] mView = new int[4]; 
     mView[0] = 0; 
     mView[1] = 0; 
     mView[2] = 800; //width 
     mView[3] = 480; //height 
     float [] outputCoords = new float[3]; 
     GLU.gluProject(-1.0f, -1.0f, z, modelMatrix, 0, projMatrix, 0, mView, 0, outputCoords, 0); 

     int i=0; 
     System.out.print(i); 
     // firstTimeDone=true; 
    } 
} 

//si el surface cambia, resetea la vista, imagino que esto pasa cuando cambias de modo portrait/landscape o sacas el teclado físico en móviles tipo Droid. 
public void onSurfaceChanged(GL10 gl, int width, int height) { 
    if(height == 0) {      
     height = 1;       
    } 
    gl.glViewport(0, 0, width, height);  //Reset Viewport 
    gl.glMatrixMode(GL10.GL_PROJECTION); //Select Projection Matrix 
    gl.glLoadIdentity();     //Reset Projection Matrix 
    //Aspect Ratio de la ventana 
    GLU.gluPerspective(gl, 45.0f, (float)width/(float)height, 0.1f, 100.0f); 
    gl.glMatrixMode(GL10.GL_MODELVIEW);  //Select Modelview Matrix 
    gl.glLoadIdentity();     //Reset Modelview Matrix   

} 

public boolean onTouchEvent(MotionEvent event) { 
    float x = event.getX(); 
    float y = event.getY(); 
    switch (event.getAction()) 
    { 
     case MotionEvent.ACTION_MOVE: 
      //Calculamos el cambio 
      float dx = x - oldX; 
      float dy = y - oldY; 
      xrot += dy * TOUCH_SCALE; 
      yrot += dx * TOUCH_SCALE; 
      //Log.w("XXXXXX", "ACTION_MOVE_NO_ZOOM"); 
      break; 
    } 
    oldX = x; 
    oldY = y; 
    return true; //El evento ha sido manejado 
} 

public void zoomIn(){ 
    z=z+0.2f; 
    if (z>-1.0f) 
     z=-1.0f; 
} 
public void zoomOut(){ 
    z=z-0.2f; 
    if (z<-20.0f) 
     z=-20.0f; 
} 
public void rotateL(){ 
    zrot=zrot+3.0f; 
} 
public void rotateR(){ 
    zrot=zrot-3.0f; 
} 
public void reset() 
{ 
    xrot=0; 
    yrot=0; 
    zrot=0; 
    xspeed=0; 
    yspeed=0; 
    z = -5.0f; 
} 
} 

这是我的平方类别:

public class Square { 
//Buffer de vertices 
private FloatBuffer vertexBuffer; 
//Buffer de coordenadas de texturas 
private FloatBuffer textureBuffer; 
//Puntero de texturas 
private int[] textures = new int[3]; 
//El item a representar 
private Bitmap image; 
//Definición de vertices 

private float vertices[] = 
{ 
    -1.0f, -1.0f, 0.0f,  //Bottom Left 
    1.0f, -1.0f, 0.0f,  //Bottom Right 
    -1.0f, 1.0f, 0.0f,  //Top Left 
    1.0f, 1.0f, 0.0f  //Top Right 
}; 
/* 
private float vertices[] = 
{ 
-0.8f, -0.8f, 0.0f,  //Bottom Left 
0.8f, -0.8f, 0.0f,  //Bottom Right 
-0.8f, 0.8f, 0.0f,  //Top Left 
0.8f, 0.8f, 0.0f 
}; 
*/ 
//Coordenadas (u, v) de las texturas  
/* 
private float texture[] = 
{   
    //Mapping coordinates for the vertices 
    0.0f, 0.0f, 
    0.0f, 1.0f, 
    1.0f, 0.0f, 
    1.0f, 1.0f 
}; 
*/ 
private float texture[] = 
{ 
    //Mapping coordinates for the vertices 
    0.0f, 1.0f, 
    1.0f, 1.0f, 
    0.0f, 0.0f, 
    1.0f, 0.0f 
}; 
//Inicializamos los buffers 
public Square(Bitmap image) { 
    ByteBuffer byteBuf = ByteBuffer.allocateDirect(vertices.length * 4); 
    byteBuf.order(ByteOrder.nativeOrder()); 
    vertexBuffer = byteBuf.asFloatBuffer(); 
    vertexBuffer.put(vertices); 
    vertexBuffer.position(0); 

    byteBuf = ByteBuffer.allocateDirect(texture.length * 4); 
    byteBuf.order(ByteOrder.nativeOrder()); 
    textureBuffer = byteBuf.asFloatBuffer(); 
    textureBuffer.put(texture); 
    textureBuffer.position(0); 

    this.image=image; 
} 
//Funcion de dibujado 
public void draw(GL10 gl) { 
    gl.glFrontFace(GL10.GL_CCW); 
    //gl.glEnable(GL10.GL_BLEND); 
    //Bind our only previously generated texture in this case 
    gl.glBindTexture(GL10.GL_TEXTURE_2D, textures[0]); 
    //Point to our vertex buffer 
    gl.glVertexPointer(3, GL10.GL_FLOAT, 0, vertexBuffer); 
    gl.glTexCoordPointer(2, GL10.GL_FLOAT, 0, textureBuffer); 
    //Enable vertex buffer 
    gl.glEnableClientState(GL10.GL_VERTEX_ARRAY); 
    gl.glEnableClientState(GL10.GL_TEXTURE_COORD_ARRAY); 
    //Draw the vertices as triangle strip 
    gl.glDrawArrays(GL10.GL_TRIANGLE_STRIP, 0, vertices.length/3); 
    //Disable the client state before leaving 
    gl.glDisableClientState(GL10.GL_VERTEX_ARRAY); 
    gl.glDisableClientState(GL10.GL_TEXTURE_COORD_ARRAY); 
    //gl.glDisable(GL10.GL_BLEND);  
} 
//Carga de texturas 
public void loadGLTexture(GL10 gl, Context context) { 
    //Generamos un puntero de texturas 
    gl.glGenTextures(1, textures, 0);  
    //y se lo asignamos a nuestro array 
    gl.glBindTexture(GL10.GL_TEXTURE_2D, textures[0]); 
    //Creamos filtros de texturas 
    gl.glTexParameterf(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_MIN_FILTER, GL10.GL_NEAREST); 
    gl.glTexParameterf(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_MAG_FILTER, GL10.GL_LINEAR); 
    //Diferentes parametros de textura posibles GL10.GL_CLAMP_TO_EDGE 
    gl.glTexParameterf(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_WRAP_S, GL10.GL_REPEAT); 
    gl.glTexParameterf(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_WRAP_T, GL10.GL_REPEAT);  
    /* 
    String imagePath = "radiocd5.png"; 
    AssetManager mngr = context.getAssets(); 
    InputStream is=null; 
    try { 
     is = mngr.open(imagePath); 
    } catch (IOException e1) { e1.printStackTrace(); } 
    */ 
    //Get the texture from the Android resource directory 
    InputStream is=null; 
    /* 
    if (item.equals("rim")) 
     is = context.getResources().openRawResource(R.drawable.rueda); 
    else if (item.equals("selector")) 
     is = context.getResources().openRawResource(R.drawable.selector); 
    */  
    /* 
    is = context.getResources().openRawResource(resourceId); 
    Bitmap bitmap = null; 
    try { 
     bitmap = BitmapFactory.decodeStream(is); 
    } finally { 
     try { 
      is.close(); 
      is = null; 
     } catch (IOException e) { 
     } 
    } 
    */ 
    Bitmap bitmap =image;  
    //con el siguiente código redimensionamos las imágenes que sean mas grandes de 256x256. 
    int newW=bitmap.getWidth(); 
    int newH=bitmap.getHeight(); 
    float fact; 
    if (newH>256 || newW>256) 
    { 
     if (newH>256) 
     { 
      fact=(float)255/(float)newH; //porcentaje por el que multiplicar para ser tamaño 256 
      newH=(int)(newH*fact); //altura reducida al porcentaje necesario 
      newW=(int)(newW*fact); //anchura reducida al porcentaje necesario 
     } 
     if (newW>256) 
     { 
      fact=(float)255/(float)newW; //porcentaje por el que multiplicar para ser tamaño 256 
      newH=(int)(newH*fact); //altura reducida al porcentaje necesario 
      newW=(int)(newW*fact); //anchura reducida al porcentaje necesario 
     } 
     bitmap=Bitmap.createScaledBitmap(bitmap, newW, newH, true); 
    }  
    //con el siguiente código transformamos imágenes no potencia de 2 en imágenes potencia de 2 (pot) 
    //meto el bitmap NOPOT en un bitmap POT para que no aparezcan texturas blancas. 
    int nextPot=256; 
    int h = bitmap.getHeight(); 
    int w = bitmap.getWidth(); 
    int offx=(nextPot-w)/2; //distancia respecto a la izquierda, para que la imagen quede centrada en la nueva imagen POT 
    int offy=(nextPot-h)/2; //distancia respecto a arriba, para que la imagen quede centrada en la nueva imagen POT 
    Bitmap bitmap2 = Bitmap.createBitmap(nextPot, nextPot, Bitmap.Config.ARGB_8888); //crea un bitmap transparente gracias al ARGB_8888 
    Canvas comboImage = new Canvas(bitmap2); 
    comboImage.drawBitmap(bitmap, offx, offy, null); 
    comboImage.save(); 

    //Usamos Android GLUtils para espcificar una textura de 2 dimensiones para nuestro bitmap 
    GLUtils.texImage2D(GL10.GL_TEXTURE_2D, 0, bitmap2, 0); 

    //Checkeamos si el GL context es versión 1.1 y generamos los Mipmaps por Flag. Si no, llamamos a nuestra propia implementación 
    if(gl instanceof GL11) { 
     gl.glTexParameterf(GL11.GL_TEXTURE_2D, GL11.GL_GENERATE_MIPMAP, GL11.GL_TRUE); 
     GLUtils.texImage2D(GL10.GL_TEXTURE_2D, 0, bitmap2, 0); 
    } else { 
     buildMipmap(gl, bitmap2); 
    } 
    //Limpiamos los bitmaps 
    bitmap.recycle(); 
    bitmap2.recycle(); 
} 
//Nuestra implementación de MipMap. Escalamos el bitmap original hacia abajo por factor de 2 y lo asignamos como nuevo nivel de mipmap 
private void buildMipmap(GL10 gl, Bitmap bitmap) { 
    int level = 0; 
    int height = bitmap.getHeight(); 
    int width = bitmap.getWidth(); 
    while(height >= 1 || width >= 1) { 
     GLUtils.texImage2D(GL10.GL_TEXTURE_2D, level, bitmap, 0); 
     if(height == 1 || width == 1) { 
      break; 
     } 
     level++; 
     height /= 2; 
     width /= 2; 
     Bitmap bitmap2 = Bitmap.createScaledBitmap(bitmap, width, height, true); 
     bitmap.recycle(); 
     bitmap = bitmap2; 
    } 
} 
} 

回答

5

你有没有看Android的教程代码?他们与OpenGL ES 1和2中的例子非常相似。

在OpenGL ES 1教程中,有一个仅用于处理触摸事件的部分。 http://developer.android.com/resources/tutorials/opengl/opengl-es10.html#touch

所以你想修改AddMotion节从glrotatef命令到gltranslatef;

编辑

看来你更感兴趣的坐标转换比对象选择。所以,无论你在屏幕上触摸什么位置,图像都会出现在哪里(而不是触摸和拖动图像,这意味着选择)。 而你关于winZ的问题让我觉得你在尝试gluunproject。 如果是这样的话,您已经知道了您的winZ,因为您通过“z”变量将相机从对象转回。既然你的z是负数,为什么不试试呢?

假设您已经设置您的GLWrapper为您的活动您GLSurfaceView:

mGLView.setGLWrapper(new GLWrapper() { 
     public GL wrap(GL gl) { 
      return new MatrixTrackingGL(gl); 
     } 

    }); 

然后,在你GLSurfaceView /渲染器的子类...

public float[] unproject(GL10 gl, float x, float y) { 
    mMatrixGrabber.getCurrentState(gl); 
    int[] view = {0,0,this.getWidth(), this.getHeight()}; 
    float[] pos = new float[4]; 
    float[] result = null; 
    int retval = GLU.gluUnProject(x, y, -z, 
      mMatrixGrabber.mModelView, 0, 
      mMatrixGrabber.mProjection, 0, 
      view, 0, 
      pos, 0); 
    if (retval != GL10.GL_TRUE) { 
     Log.e("unproject", GLU.gluErrorString(retval)); 
    } else { 
     result = new float[3]; 
     result[0] = pos[0]/pos[3]; 
     result[1] = pos[1]/pos[3]; 
     result[2] = pos[2]/pos[3]; 
     result = pos; 
    } 
    return result; 
} 

然后你就可以修改你的TouchEvent处理程序以包含

switch (event.getAction()) 
    { 
     case MotionEvent.ACTION_MOVE: 
      //Calculamos el cambio 
      float dx = x - oldX; 
      float dy = y - oldY; 
      xrot += dy * TOUCH_SCALE; 
      yrot += dx * TOUCH_SCALE; 
      //Log.w("XXXXXX", "ACTION_MOVE_NO_ZOOM"); 
      touching = true; 
      break; 
     case MotionEvent.ACTION_UP: 
      xrot = 0; 
      yrot = 0; 
      zrot = 0; 
      touching = false; 
      break; 
    } 

并将此下一节放在您的绘图方法之前其他翻译/比例/旋转调用:

if (touching) { 
     float[] point = unproject(gl, oldX, (this.getHeight() - oldY)); 
     if (point == null) { 
      Log.e("Draw", "No Point"); 
     } else { 
      gl.glTranslatef(point[0], point[1], 0); 
     } 
    } 

希望这会给你你想要的结果。

+0

是的,我检查了几个小时前,它不能帮助我进入我的需求 – NullPointerException

+0

什么,具体来说,它可以帮助你吗? 它显示了如何处理输入,并根据输入修改opengl对象的位置。 你的意思是你想直接拖动对象吗?所以你的问题与坐标转换有关,而不是输入处理?如果是这种情况,您可能需要考虑使用2d正交投影进行通用坐标转换。否则,你必须做一些数学计算位置基于相机截锥体深度。 – Marc

+0

如果您遇到转换问题,请点击以下链接查看相同的问题:http://stackoverflow.com/questions/7437482/gluunproject-android-opengl-es-1-1-usage – Marc

1

我会实现一个ontouch监听等待一个动作下来。触发后,通过getrawx或y获取手指位置,然后相应地重新绘制opengl中的正方形。这是一个很好的教程链接。 http://www.zdnet.com/blog/burnette/how-to-use-multi-touch-in-android-2-part-5-implementing-the-drag-gesture/1789?tag=content;siu-container

+0

是不是那么容易,opengl坐标不是一样的x,y屏幕坐标。并且比两行更有帮助将会更好 – NullPointerException

+0

你不能将坐标对转换成opengl可以使用的东西吗?我知道我在android中使用了不同的图形库。实际上,获取屏幕大小非常简单,并将这些坐标与屏幕关联。 –

+0

我无法将该教程应用到我的代码中,他正在使用矩阵,我没有,我正在使用另一种实现的Square类,我将编辑我的问题以在几秒内添加我的代码 – NullPointerException